2019-05-19 05:08:20 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-08-06 16:08:36 -07:00
|
|
|
/*
|
|
|
|
* zpool memory storage api
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 Dan Streetman
|
|
|
|
*
|
|
|
|
* This is a common frontend for memory storage pool implementations.
|
|
|
|
* Typically, this is used to store compressed memory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/zpool.h>
|
|
|
|
|
|
|
|
struct zpool {
|
|
|
|
struct zpool_driver *driver;
|
|
|
|
void *pool;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(drivers_head);
|
|
|
|
static DEFINE_SPINLOCK(drivers_lock);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_register_driver() - register a zpool implementation.
|
|
|
|
* @driver: driver to register
|
|
|
|
*/
|
|
|
|
void zpool_register_driver(struct zpool_driver *driver)
|
|
|
|
{
|
|
|
|
spin_lock(&drivers_lock);
|
|
|
|
atomic_set(&driver->refcount, 0);
|
|
|
|
list_add(&driver->list, &drivers_head);
|
|
|
|
spin_unlock(&drivers_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(zpool_register_driver);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_unregister_driver() - unregister a zpool implementation.
|
|
|
|
* @driver: driver to unregister.
|
|
|
|
*
|
|
|
|
* Module usage counting is used to prevent using a driver
|
|
|
|
* while/after unloading, so if this is called from module
|
|
|
|
* exit function, this should never fail; if called from
|
|
|
|
* other than the module exit function, and this returns
|
|
|
|
* failure, the driver is in use and must remain available.
|
|
|
|
*/
|
|
|
|
int zpool_unregister_driver(struct zpool_driver *driver)
|
|
|
|
{
|
|
|
|
int ret = 0, refcount;
|
|
|
|
|
|
|
|
spin_lock(&drivers_lock);
|
|
|
|
refcount = atomic_read(&driver->refcount);
|
|
|
|
WARN_ON(refcount < 0);
|
|
|
|
if (refcount > 0)
|
|
|
|
ret = -EBUSY;
|
|
|
|
else
|
|
|
|
list_del(&driver->list);
|
|
|
|
spin_unlock(&drivers_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(zpool_unregister_driver);
|
|
|
|
|
2015-11-06 17:29:18 -07:00
|
|
|
/* this assumes @type is null-terminated. */
|
2015-11-06 17:29:21 -07:00
|
|
|
static struct zpool_driver *zpool_get_driver(const char *type)
|
2014-08-06 16:08:36 -07:00
|
|
|
{
|
|
|
|
struct zpool_driver *driver;
|
|
|
|
|
|
|
|
spin_lock(&drivers_lock);
|
|
|
|
list_for_each_entry(driver, &drivers_head, list) {
|
|
|
|
if (!strcmp(driver->type, type)) {
|
|
|
|
bool got = try_module_get(driver->owner);
|
|
|
|
|
|
|
|
if (got)
|
|
|
|
atomic_inc(&driver->refcount);
|
|
|
|
spin_unlock(&drivers_lock);
|
|
|
|
return got ? driver : NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&drivers_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void zpool_put_driver(struct zpool_driver *driver)
|
|
|
|
{
|
|
|
|
atomic_dec(&driver->refcount);
|
|
|
|
module_put(driver->owner);
|
|
|
|
}
|
|
|
|
|
zpool: add zpool_has_pool()
This series makes creation of the zpool and compressor dynamic, so that
they can be changed at runtime. This makes using/configuring zswap
easier, as before this zswap had to be configured at boot time, using boot
params.
This uses a single list to track both the zpool and compressor together,
although Seth had mentioned an alternative which is to track the zpools
and compressors using separate lists. In the most common case, only a
single zpool and single compressor, using one list is slightly simpler
than using two lists, and for the uncommon case of multiple zpools and/or
compressors, using one list is slightly less simple (and uses slightly
more memory, probably) than using two lists.
This patch (of 4):
Add zpool_has_pool() function, indicating if the specified type of zpool
is available (i.e. zsmalloc or zbud). This allows checking if a pool is
available, without actually trying to allocate it, similar to
crypto_has_alg().
This is used by a following patch to zswap that enables the dynamic
runtime creation of zswap zpools.
Signed-off-by: Dan Streetman <ddstreet@ieee.org>
Acked-by: Seth Jennings <sjennings@variantweb.net>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 15:35:16 -07:00
|
|
|
/**
|
|
|
|
* zpool_has_pool() - Check if the pool driver is available
|
2018-02-06 16:42:13 -07:00
|
|
|
* @type: The type of the zpool to check (e.g. zbud, zsmalloc)
|
zpool: add zpool_has_pool()
This series makes creation of the zpool and compressor dynamic, so that
they can be changed at runtime. This makes using/configuring zswap
easier, as before this zswap had to be configured at boot time, using boot
params.
This uses a single list to track both the zpool and compressor together,
although Seth had mentioned an alternative which is to track the zpools
and compressors using separate lists. In the most common case, only a
single zpool and single compressor, using one list is slightly simpler
than using two lists, and for the uncommon case of multiple zpools and/or
compressors, using one list is slightly less simple (and uses slightly
more memory, probably) than using two lists.
This patch (of 4):
Add zpool_has_pool() function, indicating if the specified type of zpool
is available (i.e. zsmalloc or zbud). This allows checking if a pool is
available, without actually trying to allocate it, similar to
crypto_has_alg().
This is used by a following patch to zswap that enables the dynamic
runtime creation of zswap zpools.
Signed-off-by: Dan Streetman <ddstreet@ieee.org>
Acked-by: Seth Jennings <sjennings@variantweb.net>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 15:35:16 -07:00
|
|
|
*
|
|
|
|
* This checks if the @type pool driver is available. This will try to load
|
|
|
|
* the requested module, if needed, but there is no guarantee the module will
|
|
|
|
* still be loaded and available immediately after calling. If this returns
|
|
|
|
* true, the caller should assume the pool is available, but must be prepared
|
|
|
|
* to handle the @zpool_create_pool() returning failure. However if this
|
|
|
|
* returns false, the caller should assume the requested pool type is not
|
|
|
|
* available; either the requested pool type module does not exist, or could
|
|
|
|
* not be loaded, and calling @zpool_create_pool() with the pool type will
|
|
|
|
* fail.
|
|
|
|
*
|
2015-11-06 17:29:18 -07:00
|
|
|
* The @type string must be null-terminated.
|
|
|
|
*
|
zpool: add zpool_has_pool()
This series makes creation of the zpool and compressor dynamic, so that
they can be changed at runtime. This makes using/configuring zswap
easier, as before this zswap had to be configured at boot time, using boot
params.
This uses a single list to track both the zpool and compressor together,
although Seth had mentioned an alternative which is to track the zpools
and compressors using separate lists. In the most common case, only a
single zpool and single compressor, using one list is slightly simpler
than using two lists, and for the uncommon case of multiple zpools and/or
compressors, using one list is slightly less simple (and uses slightly
more memory, probably) than using two lists.
This patch (of 4):
Add zpool_has_pool() function, indicating if the specified type of zpool
is available (i.e. zsmalloc or zbud). This allows checking if a pool is
available, without actually trying to allocate it, similar to
crypto_has_alg().
This is used by a following patch to zswap that enables the dynamic
runtime creation of zswap zpools.
Signed-off-by: Dan Streetman <ddstreet@ieee.org>
Acked-by: Seth Jennings <sjennings@variantweb.net>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 15:35:16 -07:00
|
|
|
* Returns: true if @type pool is available, false if not
|
|
|
|
*/
|
|
|
|
bool zpool_has_pool(char *type)
|
|
|
|
{
|
|
|
|
struct zpool_driver *driver = zpool_get_driver(type);
|
|
|
|
|
|
|
|
if (!driver) {
|
|
|
|
request_module("zpool-%s", type);
|
|
|
|
driver = zpool_get_driver(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!driver)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
zpool_put_driver(driver);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(zpool_has_pool);
|
|
|
|
|
2014-08-06 16:08:36 -07:00
|
|
|
/**
|
|
|
|
* zpool_create_pool() - Create a new zpool
|
2018-02-06 16:42:13 -07:00
|
|
|
* @type: The type of the zpool to create (e.g. zbud, zsmalloc)
|
|
|
|
* @name: The name of the zpool (e.g. zram0, zswap)
|
|
|
|
* @gfp: The GFP flags to use when allocating the pool.
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* This creates a new zpool of the specified type. The gfp flags will be
|
|
|
|
* used when allocating memory, if the implementation supports it. If the
|
2018-01-31 17:19:59 -07:00
|
|
|
* ops param is NULL, then the created zpool will not be evictable.
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* Implementations must guarantee this to be thread-safe.
|
|
|
|
*
|
2015-11-06 17:29:18 -07:00
|
|
|
* The @type and @name strings must be null-terminated.
|
|
|
|
*
|
2014-08-06 16:08:36 -07:00
|
|
|
* Returns: New zpool on success, NULL on failure.
|
|
|
|
*/
|
2023-06-12 02:38:13 -07:00
|
|
|
struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
|
2014-08-06 16:08:36 -07:00
|
|
|
{
|
|
|
|
struct zpool_driver *driver;
|
|
|
|
struct zpool *zpool;
|
|
|
|
|
2015-06-25 15:00:37 -07:00
|
|
|
pr_debug("creating pool type %s\n", type);
|
2014-08-06 16:08:36 -07:00
|
|
|
|
|
|
|
driver = zpool_get_driver(type);
|
|
|
|
|
|
|
|
if (!driver) {
|
2014-08-29 15:18:40 -07:00
|
|
|
request_module("zpool-%s", type);
|
2014-08-06 16:08:36 -07:00
|
|
|
driver = zpool_get_driver(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!driver) {
|
|
|
|
pr_err("no driver for type %s\n", type);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool = kmalloc(sizeof(*zpool), gfp);
|
|
|
|
if (!zpool) {
|
|
|
|
pr_err("couldn't create zpool - out of memory\n");
|
|
|
|
zpool_put_driver(driver);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool->driver = driver;
|
2023-06-12 02:38:13 -07:00
|
|
|
zpool->pool = driver->create(name, gfp);
|
2014-08-06 16:08:36 -07:00
|
|
|
|
|
|
|
if (!zpool->pool) {
|
|
|
|
pr_err("couldn't create %s pool\n", type);
|
|
|
|
zpool_put_driver(driver);
|
|
|
|
kfree(zpool);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-06-25 15:00:37 -07:00
|
|
|
pr_debug("created pool type %s\n", type);
|
2014-08-06 16:08:36 -07:00
|
|
|
|
|
|
|
return zpool;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_destroy_pool() - Destroy a zpool
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool to destroy.
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* Implementations must guarantee this to be thread-safe,
|
|
|
|
* however only when destroying different pools. The same
|
|
|
|
* pool should only be destroyed once, and should not be used
|
|
|
|
* after it is destroyed.
|
|
|
|
*
|
|
|
|
* This destroys an existing zpool. The zpool should not be in use.
|
|
|
|
*/
|
|
|
|
void zpool_destroy_pool(struct zpool *zpool)
|
|
|
|
{
|
2015-11-06 17:29:18 -07:00
|
|
|
pr_debug("destroying pool type %s\n", zpool->driver->type);
|
2014-08-06 16:08:36 -07:00
|
|
|
|
|
|
|
zpool->driver->destroy(zpool->pool);
|
|
|
|
zpool_put_driver(zpool->driver);
|
|
|
|
kfree(zpool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_get_type() - Get the type of the zpool
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool to check
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* This returns the type of the pool.
|
|
|
|
*
|
|
|
|
* Implementations must guarantee this to be thread-safe.
|
|
|
|
*
|
|
|
|
* Returns: The type of zpool.
|
|
|
|
*/
|
2015-11-06 17:29:18 -07:00
|
|
|
const char *zpool_get_type(struct zpool *zpool)
|
2014-08-06 16:08:36 -07:00
|
|
|
{
|
2015-11-06 17:29:18 -07:00
|
|
|
return zpool->driver->type;
|
2014-08-06 16:08:36 -07:00
|
|
|
}
|
|
|
|
|
2019-09-23 15:39:37 -07:00
|
|
|
/**
|
2020-08-11 18:33:28 -07:00
|
|
|
* zpool_malloc_support_movable() - Check if the zpool supports
|
|
|
|
* allocating movable memory
|
2019-09-23 15:39:37 -07:00
|
|
|
* @zpool: The zpool to check
|
|
|
|
*
|
2020-08-11 18:33:28 -07:00
|
|
|
* This returns if the zpool supports allocating movable memory.
|
2019-09-23 15:39:37 -07:00
|
|
|
*
|
|
|
|
* Implementations must guarantee this to be thread-safe.
|
|
|
|
*
|
2020-08-11 18:33:28 -07:00
|
|
|
* Returns: true if the zpool supports allocating movable memory, false if not
|
2019-09-23 15:39:37 -07:00
|
|
|
*/
|
|
|
|
bool zpool_malloc_support_movable(struct zpool *zpool)
|
|
|
|
{
|
|
|
|
return zpool->driver->malloc_support_movable;
|
|
|
|
}
|
|
|
|
|
2014-08-06 16:08:36 -07:00
|
|
|
/**
|
|
|
|
* zpool_malloc() - Allocate memory
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool to allocate from.
|
2018-02-06 16:42:13 -07:00
|
|
|
* @size: The amount of memory to allocate.
|
|
|
|
* @gfp: The GFP flags to use when allocating memory.
|
|
|
|
* @handle: Pointer to the handle to set
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* This allocates the requested amount of memory from the pool.
|
|
|
|
* The gfp flags will be used when allocating memory, if the
|
|
|
|
* implementation supports it. The provided @handle will be
|
|
|
|
* set to the allocated object handle.
|
|
|
|
*
|
|
|
|
* Implementations must guarantee this to be thread-safe.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, negative value on error.
|
|
|
|
*/
|
|
|
|
int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
|
|
|
|
unsigned long *handle)
|
|
|
|
{
|
|
|
|
return zpool->driver->malloc(zpool->pool, size, gfp, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_free() - Free previously allocated memory
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool that allocated the memory.
|
2018-02-06 16:42:13 -07:00
|
|
|
* @handle: The handle to the memory to free.
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* This frees previously allocated memory. This does not guarantee
|
|
|
|
* that the pool will actually free memory, only that the memory
|
|
|
|
* in the pool will become available for use by the pool.
|
|
|
|
*
|
|
|
|
* Implementations must guarantee this to be thread-safe,
|
|
|
|
* however only when freeing different handles. The same
|
|
|
|
* handle should only be freed once, and should not be used
|
|
|
|
* after freeing.
|
|
|
|
*/
|
|
|
|
void zpool_free(struct zpool *zpool, unsigned long handle)
|
|
|
|
{
|
|
|
|
zpool->driver->free(zpool->pool, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_map_handle() - Map a previously allocated handle into memory
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool that the handle was allocated from
|
2018-02-06 16:42:13 -07:00
|
|
|
* @handle: The handle to map
|
2018-02-06 16:42:16 -07:00
|
|
|
* @mapmode: How the memory should be mapped
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
2018-02-06 16:42:16 -07:00
|
|
|
* This maps a previously allocated handle into memory. The @mapmode
|
2014-08-06 16:08:36 -07:00
|
|
|
* param indicates to the implementation how the memory will be
|
|
|
|
* used, i.e. read-only, write-only, read-write. If the
|
|
|
|
* implementation does not support it, the memory will be treated
|
|
|
|
* as read-write.
|
|
|
|
*
|
|
|
|
* This may hold locks, disable interrupts, and/or preemption,
|
|
|
|
* and the zpool_unmap_handle() must be called to undo those
|
|
|
|
* actions. The code that uses the mapped handle should complete
|
2021-05-06 18:06:47 -07:00
|
|
|
* its operations on the mapped handle memory quickly and unmap
|
2014-08-06 16:08:36 -07:00
|
|
|
* as soon as possible. As the implementation may use per-cpu
|
|
|
|
* data, multiple handles should not be mapped concurrently on
|
|
|
|
* any cpu.
|
|
|
|
*
|
|
|
|
* Returns: A pointer to the handle's mapped memory area.
|
|
|
|
*/
|
|
|
|
void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
|
|
|
|
enum zpool_mapmode mapmode)
|
|
|
|
{
|
|
|
|
return zpool->driver->map(zpool->pool, handle, mapmode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zpool_unmap_handle() - Unmap a previously mapped handle
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool that the handle was allocated from
|
2018-02-06 16:42:13 -07:00
|
|
|
* @handle: The handle to unmap
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
|
|
|
* This unmaps a previously mapped handle. Any locks or other
|
|
|
|
* actions that the implementation took in zpool_map_handle()
|
|
|
|
* will be undone here. The memory area returned from
|
|
|
|
* zpool_map_handle() should no longer be used after this.
|
|
|
|
*/
|
|
|
|
void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
|
|
|
|
{
|
|
|
|
zpool->driver->unmap(zpool->pool, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-03-12 08:34:12 -07:00
|
|
|
* zpool_get_total_pages() - The total size of the pool
|
2018-02-06 16:42:16 -07:00
|
|
|
* @zpool: The zpool to check
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
2024-03-12 08:34:12 -07:00
|
|
|
* This returns the total size in pages of the pool.
|
2014-08-06 16:08:36 -07:00
|
|
|
*
|
2024-03-12 08:34:12 -07:00
|
|
|
* Returns: Total size of the zpool in pages.
|
2014-08-06 16:08:36 -07:00
|
|
|
*/
|
2024-03-12 08:34:12 -07:00
|
|
|
u64 zpool_get_total_pages(struct zpool *zpool)
|
2014-08-06 16:08:36 -07:00
|
|
|
{
|
2024-03-12 08:34:12 -07:00
|
|
|
return zpool->driver->total_pages(zpool->pool);
|
2014-08-06 16:08:36 -07:00
|
|
|
}
|
|
|
|
|
mm/zswap: add the flag can_sleep_mapped
Patch series "Fix the compatibility of zsmalloc and zswap".
Patch #1 adds a flag to zpool, then zswap used to determine if zpool
drivers such as zbud/z3fold/zsmalloc will enter an atomic context after
mapping.
The difference between zbud/z3fold and zsmalloc is that zsmalloc requires
an atomic context that since its map function holds a preempt-disabled,
but zbud/z3fold don't require an atomic context. So patch #2 sets flag
sleep_mapped to true indicating that zbud/z3fold can sleep after mapping.
zsmalloc didn't support sleep after mapping, so don't set that flag to
true.
This patch (of 2):
Add a flag to zpool, named is "can_sleep_mapped", and have it set true for
zbud/z3fold, not set this flag for zsmalloc, so its default value is
false. Then zswap could go the current path if the flag is true; and if
it's false, copy data from src to a temporary buffer, then unmap the
handle, take the mutex, process the buffer instead of src to avoid
sleeping function called from atomic context.
[natechancellor@gmail.com: add return value in zswap_frontswap_load]
Link: https://lkml.kernel.org/r/20210121214804.926843-1-natechancellor@gmail.com
[tiantao6@hisilicon.com: fix potential memory leak]
Link: https://lkml.kernel.org/r/1611538365-51811-1-git-send-email-tiantao6@hisilicon.com
[colin.king@canonical.com: fix potential uninitialized pointer read on tmp]
Link: https://lkml.kernel.org/r/20210128141728.639030-1-colin.king@canonical.com
[tiantao6@hisilicon.com: fix variable 'entry' is uninitialized when used]
Link: https://lkml.kernel.org/r/1611223030-58346-1-git-send-email-tiantao6@hisilicon.comLink: https://lkml.kernel.org/r/1611035683-12732-1-git-send-email-tiantao6@hisilicon.com
Link: https://lkml.kernel.org/r/1611035683-12732-2-git-send-email-tiantao6@hisilicon.com
Signed-off-by: Tian Tao <tiantao6@hisilicon.com>
Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Reviewed-by: Vitaly Wool <vitaly.wool@konsulko.com>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reported-by: Mike Galbraith <efault@gmx.de>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-25 18:18:17 -07:00
|
|
|
/**
|
|
|
|
* zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
|
|
|
|
* @zpool: The zpool to test
|
|
|
|
*
|
2022-11-21 20:30:22 -07:00
|
|
|
* Some allocators enter non-preemptible context in ->map() callback (e.g.
|
|
|
|
* disable pagefaults) and exit that context in ->unmap(), which limits what
|
|
|
|
* we can do with the mapped object. For instance, we cannot wait for
|
|
|
|
* asynchronous crypto API to decompress such an object or take mutexes
|
|
|
|
* since those will call into the scheduler. This function tells us whether
|
|
|
|
* we use such an allocator.
|
|
|
|
*
|
mm/zswap: add the flag can_sleep_mapped
Patch series "Fix the compatibility of zsmalloc and zswap".
Patch #1 adds a flag to zpool, then zswap used to determine if zpool
drivers such as zbud/z3fold/zsmalloc will enter an atomic context after
mapping.
The difference between zbud/z3fold and zsmalloc is that zsmalloc requires
an atomic context that since its map function holds a preempt-disabled,
but zbud/z3fold don't require an atomic context. So patch #2 sets flag
sleep_mapped to true indicating that zbud/z3fold can sleep after mapping.
zsmalloc didn't support sleep after mapping, so don't set that flag to
true.
This patch (of 2):
Add a flag to zpool, named is "can_sleep_mapped", and have it set true for
zbud/z3fold, not set this flag for zsmalloc, so its default value is
false. Then zswap could go the current path if the flag is true; and if
it's false, copy data from src to a temporary buffer, then unmap the
handle, take the mutex, process the buffer instead of src to avoid
sleeping function called from atomic context.
[natechancellor@gmail.com: add return value in zswap_frontswap_load]
Link: https://lkml.kernel.org/r/20210121214804.926843-1-natechancellor@gmail.com
[tiantao6@hisilicon.com: fix potential memory leak]
Link: https://lkml.kernel.org/r/1611538365-51811-1-git-send-email-tiantao6@hisilicon.com
[colin.king@canonical.com: fix potential uninitialized pointer read on tmp]
Link: https://lkml.kernel.org/r/20210128141728.639030-1-colin.king@canonical.com
[tiantao6@hisilicon.com: fix variable 'entry' is uninitialized when used]
Link: https://lkml.kernel.org/r/1611223030-58346-1-git-send-email-tiantao6@hisilicon.comLink: https://lkml.kernel.org/r/1611035683-12732-1-git-send-email-tiantao6@hisilicon.com
Link: https://lkml.kernel.org/r/1611035683-12732-2-git-send-email-tiantao6@hisilicon.com
Signed-off-by: Tian Tao <tiantao6@hisilicon.com>
Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Reviewed-by: Vitaly Wool <vitaly.wool@konsulko.com>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reported-by: Mike Galbraith <efault@gmx.de>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-25 18:18:17 -07:00
|
|
|
* Returns: true if zpool can sleep; false otherwise.
|
|
|
|
*/
|
|
|
|
bool zpool_can_sleep_mapped(struct zpool *zpool)
|
|
|
|
{
|
2022-11-28 12:16:11 -07:00
|
|
|
return zpool->driver->sleep_mapped;
|
mm/zswap: add the flag can_sleep_mapped
Patch series "Fix the compatibility of zsmalloc and zswap".
Patch #1 adds a flag to zpool, then zswap used to determine if zpool
drivers such as zbud/z3fold/zsmalloc will enter an atomic context after
mapping.
The difference between zbud/z3fold and zsmalloc is that zsmalloc requires
an atomic context that since its map function holds a preempt-disabled,
but zbud/z3fold don't require an atomic context. So patch #2 sets flag
sleep_mapped to true indicating that zbud/z3fold can sleep after mapping.
zsmalloc didn't support sleep after mapping, so don't set that flag to
true.
This patch (of 2):
Add a flag to zpool, named is "can_sleep_mapped", and have it set true for
zbud/z3fold, not set this flag for zsmalloc, so its default value is
false. Then zswap could go the current path if the flag is true; and if
it's false, copy data from src to a temporary buffer, then unmap the
handle, take the mutex, process the buffer instead of src to avoid
sleeping function called from atomic context.
[natechancellor@gmail.com: add return value in zswap_frontswap_load]
Link: https://lkml.kernel.org/r/20210121214804.926843-1-natechancellor@gmail.com
[tiantao6@hisilicon.com: fix potential memory leak]
Link: https://lkml.kernel.org/r/1611538365-51811-1-git-send-email-tiantao6@hisilicon.com
[colin.king@canonical.com: fix potential uninitialized pointer read on tmp]
Link: https://lkml.kernel.org/r/20210128141728.639030-1-colin.king@canonical.com
[tiantao6@hisilicon.com: fix variable 'entry' is uninitialized when used]
Link: https://lkml.kernel.org/r/1611223030-58346-1-git-send-email-tiantao6@hisilicon.comLink: https://lkml.kernel.org/r/1611035683-12732-1-git-send-email-tiantao6@hisilicon.com
Link: https://lkml.kernel.org/r/1611035683-12732-2-git-send-email-tiantao6@hisilicon.com
Signed-off-by: Tian Tao <tiantao6@hisilicon.com>
Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Reviewed-by: Vitaly Wool <vitaly.wool@konsulko.com>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reported-by: Mike Galbraith <efault@gmx.de>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-02-25 18:18:17 -07:00
|
|
|
}
|
|
|
|
|
2014-08-06 16:08:36 -07:00
|
|
|
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
|
|
|
|
MODULE_DESCRIPTION("Common API for compressed memory storage");
|