2022-04-25 05:21:33 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2022, Alibaba Cloud
|
2022-09-17 21:34:53 -07:00
|
|
|
* Copyright (C) 2022, Bytedance Inc. All rights reserved.
|
2022-04-25 05:21:33 -07:00
|
|
|
*/
|
erofs: fix lockdep false positives on initializing erofs_pseudo_mnt
Lockdep reported the following issue when mounting erofs with a domain_id:
============================================
WARNING: possible recursive locking detected
6.8.0-rc7-xfstests #521 Not tainted
--------------------------------------------
mount/396 is trying to acquire lock:
ffff907a8aaaa0e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
but task is already holding lock:
ffff907a8aaa90e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&type->s_umount_key#50/1);
lock(&type->s_umount_key#50/1);
*** DEADLOCK ***
May be due to missing lock nesting notation
2 locks held by mount/396:
#0: ffff907a8aaa90e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
#1: ffffffffc00e6f28 (erofs_domain_list_lock){+.+.}-{3:3},
at: erofs_fscache_register_fs+0x3d/0x270 [erofs]
stack backtrace:
CPU: 1 PID: 396 Comm: mount Not tainted 6.8.0-rc7-xfstests #521
Call Trace:
<TASK>
dump_stack_lvl+0x64/0xb0
validate_chain+0x5c4/0xa00
__lock_acquire+0x6a9/0xd50
lock_acquire+0xcd/0x2b0
down_write_nested+0x45/0xd0
alloc_super+0xe3/0x3d0
sget_fc+0x62/0x2f0
vfs_get_super+0x21/0x90
vfs_get_tree+0x2c/0xf0
fc_mount+0x12/0x40
vfs_kern_mount.part.0+0x75/0x90
kern_mount+0x24/0x40
erofs_fscache_register_fs+0x1ef/0x270 [erofs]
erofs_fc_fill_super+0x213/0x380 [erofs]
This is because the file_system_type of both erofs and the pseudo-mount
point of domain_id is erofs_fs_type, so two successive calls to
alloc_super() are considered to be using the same lock and trigger the
warning above.
Therefore add a nodev file_system_type called erofs_anon_fs_type in
fscache.c to silence this complaint. Because kern_mount() takes a
pointer to struct file_system_type, not its (string) name. So we don't
need to call register_filesystem(). In addition, call init_pseudo() in
erofs_anon_init_fs_context() as suggested by Al Viro, so that we can
remove erofs_fc_fill_pseudo_super(), erofs_fc_anon_get_tree(), and
erofs_anon_context_ops.
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Fixes: a9849560c55e ("erofs: introduce a pseudo mnt to manage shared cookies")
Signed-off-by: Baokun Li <libaokun1@huawei.com>
Reviewed-and-tested-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Yang Erkun <yangerkun@huawei.com>
Link: https://lore.kernel.org/r/20240307101018.2021925-1-libaokun1@huawei.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-07 03:10:18 -07:00
|
|
|
#include <linux/pseudo_fs.h>
|
2022-04-25 05:21:33 -07:00
|
|
|
#include <linux/fscache.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
2022-09-17 21:34:53 -07:00
|
|
|
static DEFINE_MUTEX(erofs_domain_list_lock);
|
2022-09-18 04:01:50 -07:00
|
|
|
static DEFINE_MUTEX(erofs_domain_cookies_lock);
|
2022-09-17 21:34:53 -07:00
|
|
|
static LIST_HEAD(erofs_domain_list);
|
2023-02-08 23:39:11 -07:00
|
|
|
static LIST_HEAD(erofs_domain_cookies_list);
|
2022-09-17 21:34:54 -07:00
|
|
|
static struct vfsmount *erofs_pseudo_mnt;
|
2022-09-17 21:34:53 -07:00
|
|
|
|
erofs: fix lockdep false positives on initializing erofs_pseudo_mnt
Lockdep reported the following issue when mounting erofs with a domain_id:
============================================
WARNING: possible recursive locking detected
6.8.0-rc7-xfstests #521 Not tainted
--------------------------------------------
mount/396 is trying to acquire lock:
ffff907a8aaaa0e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
but task is already holding lock:
ffff907a8aaa90e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&type->s_umount_key#50/1);
lock(&type->s_umount_key#50/1);
*** DEADLOCK ***
May be due to missing lock nesting notation
2 locks held by mount/396:
#0: ffff907a8aaa90e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
#1: ffffffffc00e6f28 (erofs_domain_list_lock){+.+.}-{3:3},
at: erofs_fscache_register_fs+0x3d/0x270 [erofs]
stack backtrace:
CPU: 1 PID: 396 Comm: mount Not tainted 6.8.0-rc7-xfstests #521
Call Trace:
<TASK>
dump_stack_lvl+0x64/0xb0
validate_chain+0x5c4/0xa00
__lock_acquire+0x6a9/0xd50
lock_acquire+0xcd/0x2b0
down_write_nested+0x45/0xd0
alloc_super+0xe3/0x3d0
sget_fc+0x62/0x2f0
vfs_get_super+0x21/0x90
vfs_get_tree+0x2c/0xf0
fc_mount+0x12/0x40
vfs_kern_mount.part.0+0x75/0x90
kern_mount+0x24/0x40
erofs_fscache_register_fs+0x1ef/0x270 [erofs]
erofs_fc_fill_super+0x213/0x380 [erofs]
This is because the file_system_type of both erofs and the pseudo-mount
point of domain_id is erofs_fs_type, so two successive calls to
alloc_super() are considered to be using the same lock and trigger the
warning above.
Therefore add a nodev file_system_type called erofs_anon_fs_type in
fscache.c to silence this complaint. Because kern_mount() takes a
pointer to struct file_system_type, not its (string) name. So we don't
need to call register_filesystem(). In addition, call init_pseudo() in
erofs_anon_init_fs_context() as suggested by Al Viro, so that we can
remove erofs_fc_fill_pseudo_super(), erofs_fc_anon_get_tree(), and
erofs_anon_context_ops.
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Fixes: a9849560c55e ("erofs: introduce a pseudo mnt to manage shared cookies")
Signed-off-by: Baokun Li <libaokun1@huawei.com>
Reviewed-and-tested-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Yang Erkun <yangerkun@huawei.com>
Link: https://lore.kernel.org/r/20240307101018.2021925-1-libaokun1@huawei.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-07 03:10:18 -07:00
|
|
|
static int erofs_anon_init_fs_context(struct fs_context *fc)
|
|
|
|
{
|
|
|
|
return init_pseudo(fc, EROFS_SUPER_MAGIC) ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct file_system_type erofs_anon_fs_type = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.name = "pseudo_erofs",
|
|
|
|
.init_fs_context = erofs_anon_init_fs_context,
|
|
|
|
.kill_sb = kill_anon_super,
|
|
|
|
};
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct erofs_fscache_io {
|
|
|
|
struct netfs_cache_resources cres;
|
|
|
|
struct iov_iter iter;
|
|
|
|
netfs_io_terminated_t end_io;
|
|
|
|
void *private;
|
|
|
|
refcount_t ref;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct erofs_fscache_rq {
|
2022-11-23 20:42:12 -07:00
|
|
|
struct address_space *mapping; /* The mapping being accessed */
|
|
|
|
loff_t start; /* Start position */
|
|
|
|
size_t len; /* Length of the request */
|
|
|
|
size_t submitted; /* Length of submitted */
|
|
|
|
short error; /* 0 or error that occurred */
|
|
|
|
refcount_t ref;
|
|
|
|
};
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static bool erofs_fscache_io_put(struct erofs_fscache_io *io)
|
2022-05-09 00:40:28 -07:00
|
|
|
{
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (!refcount_dec_and_test(&io->ref))
|
|
|
|
return false;
|
|
|
|
if (io->cres.ops)
|
|
|
|
io->cres.ops->end_operation(&io->cres);
|
|
|
|
kfree(io);
|
|
|
|
return true;
|
2022-05-09 00:40:28 -07:00
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static void erofs_fscache_req_complete(struct erofs_fscache_rq *req)
|
2022-05-09 00:40:28 -07:00
|
|
|
{
|
|
|
|
struct folio *folio;
|
2022-11-23 20:42:12 -07:00
|
|
|
bool failed = req->error;
|
|
|
|
pgoff_t start_page = req->start / PAGE_SIZE;
|
|
|
|
pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
|
2022-05-09 00:40:28 -07:00
|
|
|
|
2022-11-23 20:42:12 -07:00
|
|
|
XA_STATE(xas, &req->mapping->i_pages, start_page);
|
2022-05-09 00:40:28 -07:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
xas_for_each(&xas, folio, last_page) {
|
2022-11-14 05:19:43 -07:00
|
|
|
if (xas_retry(&xas, folio))
|
|
|
|
continue;
|
2022-11-23 20:42:12 -07:00
|
|
|
if (!failed)
|
2022-05-09 00:40:28 -07:00
|
|
|
folio_mark_uptodate(folio);
|
|
|
|
folio_unlock(folio);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static void erofs_fscache_req_put(struct erofs_fscache_rq *req)
|
2022-05-09 00:40:28 -07:00
|
|
|
{
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (!refcount_dec_and_test(&req->ref))
|
|
|
|
return;
|
|
|
|
erofs_fscache_req_complete(req);
|
|
|
|
kfree(req);
|
2022-05-09 00:40:28 -07:00
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping,
|
|
|
|
loff_t start, size_t len)
|
|
|
|
{
|
|
|
|
struct erofs_fscache_rq *req = kzalloc(sizeof(*req), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!req)
|
|
|
|
return NULL;
|
|
|
|
req->mapping = mapping;
|
|
|
|
req->start = start;
|
|
|
|
req->len = len;
|
|
|
|
refcount_set(&req->ref, 1);
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void erofs_fscache_req_io_put(struct erofs_fscache_io *io)
|
|
|
|
{
|
|
|
|
struct erofs_fscache_rq *req = io->private;
|
|
|
|
|
|
|
|
if (erofs_fscache_io_put(io))
|
|
|
|
erofs_fscache_req_put(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void erofs_fscache_req_end_io(void *priv,
|
2022-05-09 00:40:28 -07:00
|
|
|
ssize_t transferred_or_error, bool was_async)
|
|
|
|
{
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct erofs_fscache_io *io = priv;
|
|
|
|
struct erofs_fscache_rq *req = io->private;
|
2022-05-09 00:40:28 -07:00
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (IS_ERR_VALUE(transferred_or_error))
|
|
|
|
req->error = transferred_or_error;
|
|
|
|
erofs_fscache_req_io_put(io);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct erofs_fscache_io *erofs_fscache_req_io_alloc(struct erofs_fscache_rq *req)
|
|
|
|
{
|
|
|
|
struct erofs_fscache_io *io = kzalloc(sizeof(*io), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!io)
|
|
|
|
return NULL;
|
|
|
|
io->end_io = erofs_fscache_req_end_io;
|
|
|
|
io->private = req;
|
|
|
|
refcount_inc(&req->ref);
|
|
|
|
refcount_set(&io->ref, 1);
|
|
|
|
return io;
|
2022-05-09 00:40:28 -07:00
|
|
|
}
|
|
|
|
|
2022-04-25 05:21:36 -07:00
|
|
|
/*
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
* Read data from fscache described by cookie at pstart physical address
|
|
|
|
* offset, and fill the read data into buffer described by io->iter.
|
2022-04-25 05:21:36 -07:00
|
|
|
*/
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
|
|
|
|
loff_t pstart, struct erofs_fscache_io *io)
|
2022-04-25 05:21:36 -07:00
|
|
|
{
|
|
|
|
enum netfs_io_source source;
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct netfs_cache_resources *cres = &io->cres;
|
|
|
|
struct iov_iter *iter = &io->iter;
|
2022-04-25 05:21:36 -07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fscache_begin_read_operation(cres, cookie);
|
|
|
|
if (ret)
|
2022-11-23 20:42:12 -07:00
|
|
|
return ret;
|
2022-04-25 05:21:36 -07:00
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
while (iov_iter_count(iter)) {
|
|
|
|
size_t orig_count = iov_iter_count(iter), len = orig_count;
|
2022-11-23 20:42:12 -07:00
|
|
|
unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
|
2022-05-09 00:40:28 -07:00
|
|
|
|
2022-11-23 20:42:12 -07:00
|
|
|
source = cres->ops->prepare_ondemand_read(cres,
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
pstart, &len, LLONG_MAX, &flags, 0);
|
|
|
|
if (WARN_ON(len == 0))
|
2022-04-25 05:21:36 -07:00
|
|
|
source = NETFS_INVALID_READ;
|
|
|
|
if (source != NETFS_READ_FROM_CACHE) {
|
2024-04-24 01:42:47 -07:00
|
|
|
erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
|
2022-11-23 20:42:12 -07:00
|
|
|
return -EIO;
|
2022-04-25 05:21:36 -07:00
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
iov_iter_truncate(iter, len);
|
|
|
|
refcount_inc(&io->ref);
|
|
|
|
ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL,
|
|
|
|
io->end_io, io);
|
2022-05-09 00:40:28 -07:00
|
|
|
if (ret == -EIOCBQUEUED)
|
|
|
|
ret = 0;
|
2022-04-25 05:21:36 -07:00
|
|
|
if (ret) {
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
erofs_err(NULL, "fscache_read failed (ret %d)", ret);
|
2022-11-23 20:42:12 -07:00
|
|
|
return ret;
|
2022-04-25 05:21:36 -07:00
|
|
|
}
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (WARN_ON(iov_iter_count(iter)))
|
|
|
|
return -EIO;
|
2022-04-25 05:21:36 -07:00
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
iov_iter_reexpand(iter, orig_count - len);
|
|
|
|
pstart += len;
|
2022-04-25 05:21:36 -07:00
|
|
|
}
|
2022-11-23 20:42:12 -07:00
|
|
|
return 0;
|
2022-04-25 05:21:36 -07:00
|
|
|
}
|
|
|
|
|
2024-03-08 02:41:59 -07:00
|
|
|
struct erofs_fscache_bio {
|
|
|
|
struct erofs_fscache_io io;
|
|
|
|
struct bio bio; /* w/o bdev to share bio_add_page/endio() */
|
|
|
|
struct bio_vec bvecs[BIO_MAX_VECS];
|
|
|
|
};
|
|
|
|
|
|
|
|
static void erofs_fscache_bio_endio(void *priv,
|
|
|
|
ssize_t transferred_or_error, bool was_async)
|
|
|
|
{
|
|
|
|
struct erofs_fscache_bio *io = priv;
|
|
|
|
|
|
|
|
if (IS_ERR_VALUE(transferred_or_error))
|
|
|
|
io->bio.bi_status = errno_to_blk_status(transferred_or_error);
|
|
|
|
io->bio.bi_end_io(&io->bio);
|
|
|
|
BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0);
|
|
|
|
erofs_fscache_io_put(&io->io);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
|
|
|
|
{
|
|
|
|
struct erofs_fscache_bio *io;
|
|
|
|
|
|
|
|
io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
|
|
|
|
bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
|
|
|
|
io->io.private = mdev->m_fscache->cookie;
|
|
|
|
io->io.end_io = erofs_fscache_bio_endio;
|
|
|
|
refcount_set(&io->io.ref, 1);
|
|
|
|
return &io->bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
void erofs_fscache_submit_bio(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct erofs_fscache_bio *io = container_of(bio,
|
|
|
|
struct erofs_fscache_bio, bio);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt,
|
|
|
|
bio->bi_iter.bi_size);
|
|
|
|
ret = erofs_fscache_read_io_async(io->io.private,
|
|
|
|
bio->bi_iter.bi_sector << 9, &io->io);
|
|
|
|
erofs_fscache_io_put(&io->io);
|
|
|
|
if (!ret)
|
|
|
|
return;
|
|
|
|
bio->bi_status = errno_to_blk_status(ret);
|
|
|
|
bio->bi_end_io(bio);
|
|
|
|
}
|
|
|
|
|
2022-05-24 19:55:07 -07:00
|
|
|
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
|
2022-04-25 05:21:39 -07:00
|
|
|
{
|
2024-01-15 07:46:35 -07:00
|
|
|
struct erofs_fscache *ctx = folio->mapping->host->i_private;
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
int ret = -ENOMEM;
|
|
|
|
struct erofs_fscache_rq *req;
|
|
|
|
struct erofs_fscache_io *io;
|
2022-04-25 05:21:39 -07:00
|
|
|
|
2024-01-15 07:46:35 -07:00
|
|
|
req = erofs_fscache_req_alloc(folio->mapping,
|
2022-05-09 00:40:28 -07:00
|
|
|
folio_pos(folio), folio_size(folio));
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (!req) {
|
2022-11-23 20:42:12 -07:00
|
|
|
folio_unlock(folio);
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
return ret;
|
2022-08-14 20:48:29 -07:00
|
|
|
}
|
2022-05-09 00:40:28 -07:00
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
io = erofs_fscache_req_io_alloc(req);
|
|
|
|
if (!io) {
|
|
|
|
req->error = ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages,
|
|
|
|
folio_pos(folio), folio_size(folio));
|
|
|
|
|
|
|
|
ret = erofs_fscache_read_io_async(ctx->cookie, folio_pos(folio), io);
|
2022-11-23 20:42:12 -07:00
|
|
|
if (ret)
|
|
|
|
req->error = ret;
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
erofs_fscache_req_io_put(io);
|
|
|
|
out:
|
2022-11-23 20:42:12 -07:00
|
|
|
erofs_fscache_req_put(req);
|
2022-04-25 05:21:39 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
|
2022-04-25 05:21:40 -07:00
|
|
|
{
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct address_space *mapping = req->mapping;
|
2022-09-21 23:24:14 -07:00
|
|
|
struct inode *inode = mapping->host;
|
2022-04-25 05:21:40 -07:00
|
|
|
struct super_block *sb = inode->i_sb;
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct erofs_fscache_io *io;
|
2022-04-25 05:21:40 -07:00
|
|
|
struct erofs_map_blocks map;
|
|
|
|
struct erofs_map_dev mdev;
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
loff_t pos = req->start + req->submitted;
|
2022-09-21 23:24:14 -07:00
|
|
|
size_t count;
|
2022-04-25 05:21:40 -07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
map.m_la = pos;
|
2023-02-08 19:48:25 -07:00
|
|
|
ret = erofs_map_blocks(inode, &map);
|
2022-04-25 05:21:40 -07:00
|
|
|
if (ret)
|
2022-09-21 23:24:14 -07:00
|
|
|
return ret;
|
2022-04-25 05:21:40 -07:00
|
|
|
|
2022-09-21 23:24:14 -07:00
|
|
|
if (map.m_flags & EROFS_MAP_META) {
|
|
|
|
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct iov_iter iter;
|
erofs: don't align offset for erofs_read_metabuf() (simple cases)
Most of the callers of erofs_read_metabuf() have the following form:
block = erofs_blknr(sb, offset);
off = erofs_blkoff(sb, offset);
p = erofs_read_metabuf(...., erofs_pos(sb, block), ...);
if (IS_ERR(p))
return PTR_ERR(p);
q = p + off;
// no further uses of p, block or off.
The value passed to erofs_read_metabuf() is offset rounded down to block
size, i.e. offset - off. Passing offset as-is would increase the return
value by off in case of success and keep the return value unchanged in
in case of error. In other words, the same could be achieved by
q = erofs_read_metabuf(...., offset, ...);
if (IS_ERR(q))
return PTR_ERR(q);
This commit convert these simple cases.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://lore.kernel.org/r/20240425195915.GD1031757@ZenIV
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-04-25 12:59:15 -07:00
|
|
|
size_t size = map.m_llen;
|
2022-09-21 23:24:14 -07:00
|
|
|
void *src;
|
|
|
|
|
erofs: don't align offset for erofs_read_metabuf() (simple cases)
Most of the callers of erofs_read_metabuf() have the following form:
block = erofs_blknr(sb, offset);
off = erofs_blkoff(sb, offset);
p = erofs_read_metabuf(...., erofs_pos(sb, block), ...);
if (IS_ERR(p))
return PTR_ERR(p);
q = p + off;
// no further uses of p, block or off.
The value passed to erofs_read_metabuf() is offset rounded down to block
size, i.e. offset - off. Passing offset as-is would increase the return
value by off in case of success and keep the return value unchanged in
in case of error. In other words, the same could be achieved by
q = erofs_read_metabuf(...., offset, ...);
if (IS_ERR(q))
return PTR_ERR(q);
This commit convert these simple cases.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://lore.kernel.org/r/20240425195915.GD1031757@ZenIV
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-04-25 12:59:15 -07:00
|
|
|
src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP);
|
2022-09-21 23:24:14 -07:00
|
|
|
if (IS_ERR(src))
|
|
|
|
return PTR_ERR(src);
|
|
|
|
|
2022-09-15 17:25:47 -07:00
|
|
|
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
|
erofs: don't align offset for erofs_read_metabuf() (simple cases)
Most of the callers of erofs_read_metabuf() have the following form:
block = erofs_blknr(sb, offset);
off = erofs_blkoff(sb, offset);
p = erofs_read_metabuf(...., erofs_pos(sb, block), ...);
if (IS_ERR(p))
return PTR_ERR(p);
q = p + off;
// no further uses of p, block or off.
The value passed to erofs_read_metabuf() is offset rounded down to block
size, i.e. offset - off. Passing offset as-is would increase the return
value by off in case of success and keep the return value unchanged in
in case of error. In other words, the same could be achieved by
q = erofs_read_metabuf(...., offset, ...);
if (IS_ERR(q))
return PTR_ERR(q);
This commit convert these simple cases.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://lore.kernel.org/r/20240425195915.GD1031757@ZenIV
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-04-25 12:59:15 -07:00
|
|
|
if (copy_to_iter(src, size, &iter) != size) {
|
2022-11-03 22:40:27 -07:00
|
|
|
erofs_put_metabuf(&buf);
|
2022-09-21 23:24:14 -07:00
|
|
|
return -EFAULT;
|
2022-11-03 22:40:27 -07:00
|
|
|
}
|
2022-09-21 23:24:14 -07:00
|
|
|
iov_iter_zero(PAGE_SIZE - size, &iter);
|
|
|
|
erofs_put_metabuf(&buf);
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
req->submitted += PAGE_SIZE;
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
return 0;
|
2022-04-25 05:21:40 -07:00
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
count = req->len - req->submitted;
|
2022-09-21 23:24:14 -07:00
|
|
|
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct iov_iter iter;
|
|
|
|
|
2022-09-15 17:25:47 -07:00
|
|
|
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
|
2022-09-21 23:24:14 -07:00
|
|
|
iov_iter_zero(count, &iter);
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
req->submitted += count;
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
return 0;
|
2022-04-25 05:21:41 -07:00
|
|
|
}
|
|
|
|
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
count = min_t(size_t, map.m_llen - (pos - map.m_la), count);
|
2022-11-03 22:40:28 -07:00
|
|
|
DBG_BUGON(!count || count % PAGE_SIZE);
|
|
|
|
|
2022-04-25 05:21:40 -07:00
|
|
|
mdev = (struct erofs_map_dev) {
|
|
|
|
.m_deviceid = map.m_deviceid,
|
|
|
|
.m_pa = map.m_pa,
|
|
|
|
};
|
|
|
|
ret = erofs_map_dev(sb, &mdev);
|
|
|
|
if (ret)
|
2022-09-21 23:24:14 -07:00
|
|
|
return ret;
|
2022-05-09 00:40:28 -07:00
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
io = erofs_fscache_req_io_alloc(req);
|
|
|
|
if (!io)
|
|
|
|
return -ENOMEM;
|
|
|
|
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
|
|
|
|
ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie,
|
|
|
|
mdev.m_pa + (pos - map.m_la), io);
|
|
|
|
erofs_fscache_req_io_put(io);
|
2022-04-25 05:21:40 -07:00
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
req->submitted += count;
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
return ret;
|
2022-04-25 05:21:40 -07:00
|
|
|
}
|
|
|
|
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
static int erofs_fscache_data_read(struct erofs_fscache_rq *req)
|
2022-04-25 05:21:42 -07:00
|
|
|
{
|
2022-09-21 23:24:14 -07:00
|
|
|
int ret;
|
|
|
|
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
do {
|
|
|
|
ret = erofs_fscache_data_read_slice(req);
|
|
|
|
if (ret)
|
|
|
|
req->error = ret;
|
|
|
|
} while (!ret && req->submitted < req->len);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
|
|
|
|
{
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct erofs_fscache_rq *req;
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
int ret;
|
|
|
|
|
2024-01-15 07:46:35 -07:00
|
|
|
req = erofs_fscache_req_alloc(folio->mapping,
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
folio_pos(folio), folio_size(folio));
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (!req) {
|
2022-09-21 23:24:14 -07:00
|
|
|
folio_unlock(folio);
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
return -ENOMEM;
|
2022-04-25 05:21:42 -07:00
|
|
|
}
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
|
|
|
|
ret = erofs_fscache_data_read(req);
|
|
|
|
erofs_fscache_req_put(req);
|
|
|
|
return ret;
|
2022-04-25 05:21:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void erofs_fscache_readahead(struct readahead_control *rac)
|
|
|
|
{
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
struct erofs_fscache_rq *req;
|
2022-04-25 05:21:42 -07:00
|
|
|
|
|
|
|
if (!readahead_count(rac))
|
|
|
|
return;
|
|
|
|
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
req = erofs_fscache_req_alloc(rac->mapping,
|
|
|
|
readahead_pos(rac), readahead_length(rac));
|
erofs: make iov_iter describe target buffers over fscache
So far the fscache mode supports uncompressed data only, and the data
read from fscache is put directly into the target page cache. As the
support for compressed data in fscache mode is going to be introduced,
rework the fscache internals so that the following compressed part
could make the raw data read from fscache be directed to the target
buffer it wants, decompress the raw data, and finally fill the page
cache with the decompressed data.
As the first step, a new structure, i.e. erofs_fscache_io (io), is
introduced to describe a generic read request from the fscache, while
the caller can specify the target buffer it wants in the iov_iter
structure (io->iter). Besides, the caller can also specify its
completion callback and private data through erofs_fscache_io, which
will be called to make further handling, e.g. unlocking the page cache
for uncompressed data or decompressing the read raw data, when the read
request from the fscache completes. Now erofs_fscache_read_io_async()
serves as a generic interface for reading raw data from fscache for both
compressed and uncompressed data.
The erofs_fscache_rq structure is kept to describe a request to fill the
page cache in the specified range.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-08 02:41:58 -07:00
|
|
|
if (!req)
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
return;
|
2022-04-25 05:21:42 -07:00
|
|
|
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
/* The request completion will drop refs on the folios. */
|
|
|
|
while (readahead_folio(rac))
|
|
|
|
;
|
2022-04-25 05:21:42 -07:00
|
|
|
|
erofs: support large folios for fscache mode
When large folios supported, one folio can be split into several slices,
each of which may be mapped to META/UNMAPPED/MAPPED, and the folio can
be unlocked as a whole only when all slices have completed.
Thus always allocate erofs_fscache_request for each .read_folio() or
.readahead(), in which case the allocated request is responsible for
unlocking folios when all slices have completed.
As described above, each folio or folio range can be mapped into several
slices, while these slices may be mapped to different cookies, and thus
each slice needs its own netfs_cache_resources. Here we introduce
chained requests to support this, where each .read_folio() or
.readahead() calling can correspond to multiple requests. Each request
has its own netfs_cache_resources and thus is used to access one cookie.
Among these requests, there's a primary request, with the others
pointing to the primary request.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221201074256.16639-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-12-01 00:42:55 -07:00
|
|
|
erofs_fscache_data_read(req);
|
|
|
|
erofs_fscache_req_put(req);
|
2022-04-25 05:21:42 -07:00
|
|
|
}
|
|
|
|
|
2022-04-25 05:21:35 -07:00
|
|
|
static const struct address_space_operations erofs_fscache_meta_aops = {
|
2022-05-24 19:55:07 -07:00
|
|
|
.read_folio = erofs_fscache_meta_read_folio,
|
2022-04-25 05:21:35 -07:00
|
|
|
};
|
|
|
|
|
2022-04-25 05:21:40 -07:00
|
|
|
const struct address_space_operations erofs_fscache_access_aops = {
|
2022-05-24 19:55:07 -07:00
|
|
|
.read_folio = erofs_fscache_read_folio,
|
2022-04-25 05:21:42 -07:00
|
|
|
.readahead = erofs_fscache_readahead,
|
2022-04-25 05:21:40 -07:00
|
|
|
};
|
|
|
|
|
2022-09-17 21:34:53 -07:00
|
|
|
static void erofs_fscache_domain_put(struct erofs_domain *domain)
|
|
|
|
{
|
|
|
|
mutex_lock(&erofs_domain_list_lock);
|
|
|
|
if (refcount_dec_and_test(&domain->ref)) {
|
|
|
|
list_del(&domain->list);
|
2022-09-17 21:34:54 -07:00
|
|
|
if (list_empty(&erofs_domain_list)) {
|
|
|
|
kern_unmount(erofs_pseudo_mnt);
|
|
|
|
erofs_pseudo_mnt = NULL;
|
|
|
|
}
|
2022-09-17 21:34:53 -07:00
|
|
|
fscache_relinquish_volume(domain->volume, NULL, false);
|
2023-02-08 23:39:12 -07:00
|
|
|
mutex_unlock(&erofs_domain_list_lock);
|
2022-09-17 21:34:53 -07:00
|
|
|
kfree(domain->domain_id);
|
|
|
|
kfree(domain);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mutex_unlock(&erofs_domain_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int erofs_fscache_register_volume(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
2022-10-20 19:31:53 -07:00
|
|
|
char *domain_id = sbi->domain_id;
|
2022-09-17 21:34:53 -07:00
|
|
|
struct fscache_volume *volume;
|
|
|
|
char *name;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
name = kasprintf(GFP_KERNEL, "erofs,%s",
|
2022-10-20 19:31:53 -07:00
|
|
|
domain_id ? domain_id : sbi->fsid);
|
2022-09-17 21:34:53 -07:00
|
|
|
if (!name)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
volume = fscache_acquire_volume(name, NULL, NULL, 0);
|
|
|
|
if (IS_ERR_OR_NULL(volume)) {
|
|
|
|
erofs_err(sb, "failed to register volume for %s", name);
|
|
|
|
ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
|
|
|
|
volume = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sbi->volume = volume;
|
|
|
|
kfree(name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int erofs_fscache_init_domain(struct super_block *sb)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct erofs_domain *domain;
|
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
|
|
|
|
|
|
domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
|
|
|
|
if (!domain)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-10-20 19:31:53 -07:00
|
|
|
domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
|
2022-09-17 21:34:53 -07:00
|
|
|
if (!domain->domain_id) {
|
|
|
|
kfree(domain);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = erofs_fscache_register_volume(sb);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2022-09-17 21:34:54 -07:00
|
|
|
if (!erofs_pseudo_mnt) {
|
erofs: fix lockdep false positives on initializing erofs_pseudo_mnt
Lockdep reported the following issue when mounting erofs with a domain_id:
============================================
WARNING: possible recursive locking detected
6.8.0-rc7-xfstests #521 Not tainted
--------------------------------------------
mount/396 is trying to acquire lock:
ffff907a8aaaa0e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
but task is already holding lock:
ffff907a8aaa90e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&type->s_umount_key#50/1);
lock(&type->s_umount_key#50/1);
*** DEADLOCK ***
May be due to missing lock nesting notation
2 locks held by mount/396:
#0: ffff907a8aaa90e0 (&type->s_umount_key#50/1){+.+.}-{3:3},
at: alloc_super+0xe3/0x3d0
#1: ffffffffc00e6f28 (erofs_domain_list_lock){+.+.}-{3:3},
at: erofs_fscache_register_fs+0x3d/0x270 [erofs]
stack backtrace:
CPU: 1 PID: 396 Comm: mount Not tainted 6.8.0-rc7-xfstests #521
Call Trace:
<TASK>
dump_stack_lvl+0x64/0xb0
validate_chain+0x5c4/0xa00
__lock_acquire+0x6a9/0xd50
lock_acquire+0xcd/0x2b0
down_write_nested+0x45/0xd0
alloc_super+0xe3/0x3d0
sget_fc+0x62/0x2f0
vfs_get_super+0x21/0x90
vfs_get_tree+0x2c/0xf0
fc_mount+0x12/0x40
vfs_kern_mount.part.0+0x75/0x90
kern_mount+0x24/0x40
erofs_fscache_register_fs+0x1ef/0x270 [erofs]
erofs_fc_fill_super+0x213/0x380 [erofs]
This is because the file_system_type of both erofs and the pseudo-mount
point of domain_id is erofs_fs_type, so two successive calls to
alloc_super() are considered to be using the same lock and trigger the
warning above.
Therefore add a nodev file_system_type called erofs_anon_fs_type in
fscache.c to silence this complaint. Because kern_mount() takes a
pointer to struct file_system_type, not its (string) name. So we don't
need to call register_filesystem(). In addition, call init_pseudo() in
erofs_anon_init_fs_context() as suggested by Al Viro, so that we can
remove erofs_fc_fill_pseudo_super(), erofs_fc_anon_get_tree(), and
erofs_anon_context_ops.
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Fixes: a9849560c55e ("erofs: introduce a pseudo mnt to manage shared cookies")
Signed-off-by: Baokun Li <libaokun1@huawei.com>
Reviewed-and-tested-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Yang Erkun <yangerkun@huawei.com>
Link: https://lore.kernel.org/r/20240307101018.2021925-1-libaokun1@huawei.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-03-07 03:10:18 -07:00
|
|
|
struct vfsmount *mnt = kern_mount(&erofs_anon_fs_type);
|
2024-02-12 20:44:11 -07:00
|
|
|
if (IS_ERR(mnt)) {
|
|
|
|
err = PTR_ERR(mnt);
|
2022-09-17 21:34:54 -07:00
|
|
|
goto out;
|
|
|
|
}
|
2024-02-12 20:44:11 -07:00
|
|
|
erofs_pseudo_mnt = mnt;
|
2022-09-17 21:34:54 -07:00
|
|
|
}
|
|
|
|
|
2022-09-17 21:34:53 -07:00
|
|
|
domain->volume = sbi->volume;
|
|
|
|
refcount_set(&domain->ref, 1);
|
|
|
|
list_add(&domain->list, &erofs_domain_list);
|
|
|
|
sbi->domain = domain;
|
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
kfree(domain->domain_id);
|
|
|
|
kfree(domain);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int erofs_fscache_register_domain(struct super_block *sb)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct erofs_domain *domain;
|
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
|
|
|
|
|
|
mutex_lock(&erofs_domain_list_lock);
|
|
|
|
list_for_each_entry(domain, &erofs_domain_list, list) {
|
2022-10-20 19:31:53 -07:00
|
|
|
if (!strcmp(domain->domain_id, sbi->domain_id)) {
|
2022-09-17 21:34:53 -07:00
|
|
|
sbi->domain = domain;
|
|
|
|
sbi->volume = domain->volume;
|
|
|
|
refcount_inc(&domain->ref);
|
|
|
|
mutex_unlock(&erofs_domain_list_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = erofs_fscache_init_domain(sb);
|
|
|
|
mutex_unlock(&erofs_domain_list_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
|
|
|
|
char *name, unsigned int flags)
|
2022-04-25 05:21:34 -07:00
|
|
|
{
|
|
|
|
struct fscache_volume *volume = EROFS_SB(sb)->volume;
|
|
|
|
struct erofs_fscache *ctx;
|
|
|
|
struct fscache_cookie *cookie;
|
2023-02-08 23:39:13 -07:00
|
|
|
struct super_block *isb;
|
|
|
|
struct inode *inode;
|
2022-04-25 05:21:35 -07:00
|
|
|
int ret;
|
2022-04-25 05:21:34 -07:00
|
|
|
|
|
|
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
|
|
|
if (!ctx)
|
2022-09-17 21:34:52 -07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2023-02-08 23:39:11 -07:00
|
|
|
INIT_LIST_HEAD(&ctx->node);
|
|
|
|
refcount_set(&ctx->ref, 1);
|
2022-04-25 05:21:34 -07:00
|
|
|
|
|
|
|
cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
|
|
|
|
name, strlen(name), NULL, 0, 0);
|
|
|
|
if (!cookie) {
|
|
|
|
erofs_err(sb, "failed to get cookie for %s", name);
|
2022-04-25 05:21:35 -07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
2022-04-25 05:21:34 -07:00
|
|
|
}
|
|
|
|
fscache_use_cookie(cookie, false);
|
2022-04-25 05:21:35 -07:00
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
/*
|
|
|
|
* Allocate anonymous inode in global pseudo mount for shareable blobs,
|
|
|
|
* so that they are accessible among erofs fs instances.
|
|
|
|
*/
|
|
|
|
isb = flags & EROFS_REG_COOKIE_SHARE ? erofs_pseudo_mnt->mnt_sb : sb;
|
|
|
|
inode = new_inode(isb);
|
|
|
|
if (!inode) {
|
|
|
|
erofs_err(sb, "failed to get anon inode for %s", name);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_cookie;
|
2022-04-25 05:21:35 -07:00
|
|
|
}
|
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
inode->i_size = OFFSET_MAX;
|
|
|
|
inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
|
2024-01-23 20:19:45 -07:00
|
|
|
mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
|
2023-03-13 06:53:08 -07:00
|
|
|
inode->i_blkbits = EROFS_SB(sb)->blkszbits;
|
2023-02-08 23:39:13 -07:00
|
|
|
inode->i_private = ctx;
|
|
|
|
|
|
|
|
ctx->cookie = cookie;
|
|
|
|
ctx->inode = inode;
|
2022-09-17 21:34:52 -07:00
|
|
|
return ctx;
|
2022-04-25 05:21:35 -07:00
|
|
|
|
|
|
|
err_cookie:
|
2023-02-08 23:39:13 -07:00
|
|
|
fscache_unuse_cookie(cookie, NULL, NULL);
|
|
|
|
fscache_relinquish_cookie(cookie, false);
|
2022-04-25 05:21:35 -07:00
|
|
|
err:
|
|
|
|
kfree(ctx);
|
2022-09-17 21:34:52 -07:00
|
|
|
return ERR_PTR(ret);
|
2022-04-25 05:21:34 -07:00
|
|
|
}
|
|
|
|
|
2022-09-18 04:01:50 -07:00
|
|
|
static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
|
2022-04-25 05:21:34 -07:00
|
|
|
{
|
|
|
|
fscache_unuse_cookie(ctx->cookie, NULL, NULL);
|
|
|
|
fscache_relinquish_cookie(ctx->cookie, false);
|
2022-04-25 05:21:35 -07:00
|
|
|
iput(ctx->inode);
|
2022-09-18 04:01:50 -07:00
|
|
|
kfree(ctx->name);
|
2022-04-25 05:21:34 -07:00
|
|
|
kfree(ctx);
|
|
|
|
}
|
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
static struct erofs_fscache *erofs_domain_init_cookie(struct super_block *sb,
|
|
|
|
char *name, unsigned int flags)
|
2022-09-18 04:01:50 -07:00
|
|
|
{
|
|
|
|
struct erofs_fscache *ctx;
|
|
|
|
struct erofs_domain *domain = EROFS_SB(sb)->domain;
|
|
|
|
|
2022-11-25 04:08:22 -07:00
|
|
|
ctx = erofs_fscache_acquire_cookie(sb, name, flags);
|
2022-09-18 04:01:50 -07:00
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return ctx;
|
|
|
|
|
|
|
|
ctx->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!ctx->name) {
|
2023-02-08 23:39:13 -07:00
|
|
|
erofs_fscache_relinquish_cookie(ctx);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2022-09-18 04:01:50 -07:00
|
|
|
}
|
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
refcount_inc(&domain->ref);
|
2022-09-18 04:01:50 -07:00
|
|
|
ctx->domain = domain;
|
2023-02-08 23:39:11 -07:00
|
|
|
list_add(&ctx->node, &erofs_domain_cookies_list);
|
2022-09-18 04:01:50 -07:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
static struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
|
|
|
|
char *name, unsigned int flags)
|
2022-09-18 04:01:50 -07:00
|
|
|
{
|
|
|
|
struct erofs_fscache *ctx;
|
|
|
|
struct erofs_domain *domain = EROFS_SB(sb)->domain;
|
|
|
|
|
2023-02-08 23:39:13 -07:00
|
|
|
flags |= EROFS_REG_COOKIE_SHARE;
|
2022-09-18 04:01:50 -07:00
|
|
|
mutex_lock(&erofs_domain_cookies_lock);
|
2023-02-08 23:39:11 -07:00
|
|
|
list_for_each_entry(ctx, &erofs_domain_cookies_list, node) {
|
|
|
|
if (ctx->domain != domain || strcmp(ctx->name, name))
|
2022-09-18 04:01:50 -07:00
|
|
|
continue;
|
2022-11-25 04:08:22 -07:00
|
|
|
if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
|
2023-02-08 23:39:11 -07:00
|
|
|
refcount_inc(&ctx->ref);
|
2022-11-25 04:08:22 -07:00
|
|
|
} else {
|
|
|
|
erofs_err(sb, "%s already exists in domain %s", name,
|
|
|
|
domain->domain_id);
|
|
|
|
ctx = ERR_PTR(-EEXIST);
|
|
|
|
}
|
2022-09-18 04:01:50 -07:00
|
|
|
mutex_unlock(&erofs_domain_cookies_lock);
|
|
|
|
return ctx;
|
|
|
|
}
|
2023-02-08 23:39:13 -07:00
|
|
|
ctx = erofs_domain_init_cookie(sb, name, flags);
|
2022-09-18 04:01:50 -07:00
|
|
|
mutex_unlock(&erofs_domain_cookies_lock);
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
|
2022-11-25 04:08:22 -07:00
|
|
|
char *name,
|
|
|
|
unsigned int flags)
|
2022-09-18 04:01:50 -07:00
|
|
|
{
|
2022-10-20 19:31:53 -07:00
|
|
|
if (EROFS_SB(sb)->domain_id)
|
2022-11-25 04:08:22 -07:00
|
|
|
return erofs_domain_register_cookie(sb, name, flags);
|
|
|
|
return erofs_fscache_acquire_cookie(sb, name, flags);
|
2022-09-18 04:01:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
|
|
|
|
{
|
2023-02-08 23:39:11 -07:00
|
|
|
struct erofs_domain *domain = NULL;
|
2022-09-18 04:01:50 -07:00
|
|
|
|
|
|
|
if (!ctx)
|
|
|
|
return;
|
2023-02-08 23:39:11 -07:00
|
|
|
if (!ctx->domain)
|
|
|
|
return erofs_fscache_relinquish_cookie(ctx);
|
2022-09-18 04:01:50 -07:00
|
|
|
|
2023-02-08 23:39:11 -07:00
|
|
|
mutex_lock(&erofs_domain_cookies_lock);
|
|
|
|
if (refcount_dec_and_test(&ctx->ref)) {
|
|
|
|
domain = ctx->domain;
|
|
|
|
list_del(&ctx->node);
|
|
|
|
erofs_fscache_relinquish_cookie(ctx);
|
|
|
|
}
|
|
|
|
mutex_unlock(&erofs_domain_cookies_lock);
|
|
|
|
if (domain)
|
|
|
|
erofs_fscache_domain_put(domain);
|
2022-09-18 04:01:50 -07:00
|
|
|
}
|
|
|
|
|
2022-04-25 05:21:33 -07:00
|
|
|
int erofs_fscache_register_fs(struct super_block *sb)
|
|
|
|
{
|
2022-09-17 21:34:53 -07:00
|
|
|
int ret;
|
2022-04-25 05:21:33 -07:00
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
2022-09-17 21:34:52 -07:00
|
|
|
struct erofs_fscache *fscache;
|
2023-02-08 23:39:13 -07:00
|
|
|
unsigned int flags = 0;
|
2022-04-25 05:21:33 -07:00
|
|
|
|
2022-10-20 19:31:53 -07:00
|
|
|
if (sbi->domain_id)
|
2022-09-17 21:34:53 -07:00
|
|
|
ret = erofs_fscache_register_domain(sb);
|
|
|
|
else
|
|
|
|
ret = erofs_fscache_register_volume(sb);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-09-17 21:34:52 -07:00
|
|
|
|
2022-11-25 04:08:22 -07:00
|
|
|
/*
|
|
|
|
* When shared domain is enabled, using NEED_NOEXIST to guarantee
|
|
|
|
* the primary data blob (aka fsid) is unique in the shared domain.
|
|
|
|
*
|
|
|
|
* For non-shared-domain case, fscache_acquire_volume() invoked by
|
|
|
|
* erofs_fscache_register_volume() has already guaranteed
|
|
|
|
* the uniqueness of primary data blob.
|
|
|
|
*
|
|
|
|
* Acquired domain/volume will be relinquished in kill_sb() on error.
|
|
|
|
*/
|
|
|
|
if (sbi->domain_id)
|
|
|
|
flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
|
|
|
|
fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
|
2022-09-17 21:34:52 -07:00
|
|
|
if (IS_ERR(fscache))
|
|
|
|
return PTR_ERR(fscache);
|
|
|
|
|
|
|
|
sbi->s_fscache = fscache;
|
|
|
|
return 0;
|
2022-04-25 05:21:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void erofs_fscache_unregister_fs(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
|
|
|
2022-09-17 21:34:52 -07:00
|
|
|
erofs_fscache_unregister_cookie(sbi->s_fscache);
|
2022-09-17 21:34:53 -07:00
|
|
|
|
|
|
|
if (sbi->domain)
|
|
|
|
erofs_fscache_domain_put(sbi->domain);
|
|
|
|
else
|
|
|
|
fscache_relinquish_volume(sbi->volume, NULL, false);
|
|
|
|
|
2022-09-17 21:34:52 -07:00
|
|
|
sbi->s_fscache = NULL;
|
2022-04-25 05:21:33 -07:00
|
|
|
sbi->volume = NULL;
|
2022-09-17 21:34:53 -07:00
|
|
|
sbi->domain = NULL;
|
2022-04-25 05:21:33 -07:00
|
|
|
}
|