1

mm: zswap: add basic meminfo and vmstat coverage

Currently it requires poking at debugfs to figure out the size and
population of the zswap cache on a host.  There are no counters for reads
and writes against the cache.  As a result, it's difficult to understand
zswap behavior on production systems.

Print zswap memory consumption and how many pages are zswapped out in
/proc/meminfo.  Count zswapouts and zswapins in /proc/vmstat.

Link: https://lkml.kernel.org/r/20220510152847.230957-6-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2022-05-19 14:08:53 -07:00 committed by akpm
parent b3fbd58fcb
commit f6498b776d
6 changed files with 32 additions and 7 deletions

View File

@ -964,6 +964,8 @@ Example output. You may not have all of these fields.
Mlocked: 0 kB
SwapTotal: 0 kB
SwapFree: 0 kB
Zswap: 1904 kB
Zswapped: 7792 kB
Dirty: 12 kB
Writeback: 0 kB
AnonPages: 4654780 kB
@ -1055,6 +1057,10 @@ SwapTotal
SwapFree
Memory which has been evicted from RAM, and is temporarily
on the disk
Zswap
Memory consumed by the zswap backend (compressed size)
Zswapped
Amount of anonymous memory stored in zswap (original size)
Dirty
Memory which is waiting to get written back to the disk
Writeback

View File

@ -86,6 +86,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "SwapTotal: ", i.totalswap);
show_val_kb(m, "SwapFree: ", i.freeswap);
#ifdef CONFIG_ZSWAP
seq_printf(m, "Zswap: %8lu kB\n",
(unsigned long)(zswap_pool_total_size >> 10));
seq_printf(m, "Zswapped: %8lu kB\n",
(unsigned long)atomic_read(&zswap_stored_pages) <<
(PAGE_SHIFT - 10));
#endif
show_val_kb(m, "Dirty: ",
global_node_page_state(NR_FILE_DIRTY));
show_val_kb(m, "Writeback: ",

View File

@ -620,6 +620,11 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
#ifdef CONFIG_ZSWAP
extern u64 zswap_pool_total_size;
extern atomic_t zswap_stored_pages;
#endif
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)

View File

@ -136,6 +136,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_KSM
COW_KSM,
#endif
#ifdef CONFIG_ZSWAP
ZSWPIN,
ZSWPOUT,
#endif
#ifdef CONFIG_X86
DIRECT_MAP_LEVEL2_SPLIT,
DIRECT_MAP_LEVEL3_SPLIT,

View File

@ -1396,6 +1396,10 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_KSM
"cow_ksm",
#endif
#ifdef CONFIG_ZSWAP
"zswpin",
"zswpout",
#endif
#ifdef CONFIG_X86
"direct_map_level2_splits",
"direct_map_level3_splits",

View File

@ -42,9 +42,9 @@
* statistics
**********************************/
/* Total bytes used by the compressed storage */
static u64 zswap_pool_total_size;
u64 zswap_pool_total_size;
/* The number of compressed pages currently stored in zswap */
static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
atomic_t zswap_stored_pages = ATOMIC_INIT(0);
/* The number of same-value filled pages currently stored in zswap */
static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
@ -1243,6 +1243,7 @@ insert_entry:
/* update stats */
atomic_inc(&zswap_stored_pages);
zswap_update_total_size();
count_vm_event(ZSWPOUT);
return 0;
@ -1285,11 +1286,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
zswap_fill_page(dst, entry->value);
kunmap_atomic(dst);
ret = 0;
goto freeentry;
goto stats;
}
if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
tmp = kmalloc(entry->length, GFP_ATOMIC);
if (!tmp) {
ret = -ENOMEM;
@ -1304,10 +1304,8 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
src += sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
memcpy(tmp, src, entry->length);
src = tmp;
zpool_unmap_handle(entry->pool->zpool, entry->handle);
}
@ -1326,7 +1324,8 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
kfree(tmp);
BUG_ON(ret);
stats:
count_vm_event(ZSWPIN);
freeentry:
spin_lock(&tree->lock);
zswap_entry_put(tree, entry);