1

erofs: support compressed inodes over fscache

Since fscache can utilize iov_iter to write dest buffers, bio_vec can
be used in this way too.

To simplify this, pseudo bios are prepared and bio_vec will be filled
with bio_add_page().  And a common .bi_end_io will be called directly
to handle I/O completions.

Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240308094159.40547-2-jefflexu@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
Jingbo Xu 2024-03-08 17:41:59 +08:00 committed by Gao Xiang
parent f2151df574
commit a1bafc3109
4 changed files with 77 additions and 20 deletions

View File

@ -174,6 +174,53 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
return 0;
}
struct erofs_fscache_bio {
struct erofs_fscache_io io;
struct bio bio; /* w/o bdev to share bio_add_page/endio() */
struct bio_vec bvecs[BIO_MAX_VECS];
};
static void erofs_fscache_bio_endio(void *priv,
ssize_t transferred_or_error, bool was_async)
{
struct erofs_fscache_bio *io = priv;
if (IS_ERR_VALUE(transferred_or_error))
io->bio.bi_status = errno_to_blk_status(transferred_or_error);
io->bio.bi_end_io(&io->bio);
BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0);
erofs_fscache_io_put(&io->io);
}
struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
{
struct erofs_fscache_bio *io;
io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
io->io.private = mdev->m_fscache->cookie;
io->io.end_io = erofs_fscache_bio_endio;
refcount_set(&io->io.ref, 1);
return &io->bio;
}
void erofs_fscache_submit_bio(struct bio *bio)
{
struct erofs_fscache_bio *io = container_of(bio,
struct erofs_fscache_bio, bio);
int ret;
iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt,
bio->bi_iter.bi_size);
ret = erofs_fscache_read_io_async(io->io.private,
bio->bi_iter.bi_sector << 9, &io->io);
erofs_fscache_io_put(&io->io);
if (!ret)
return;
bio->bi_status = errno_to_blk_status(ret);
bio->bi_end_io(bio);
}
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
{
struct erofs_fscache *ctx = folio->mapping->host->i_private;

View File

@ -259,14 +259,12 @@ static int erofs_fill_inode(struct inode *inode)
if (erofs_inode_is_data_compressed(vi->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP
if (!erofs_is_fscache_mode(inode->i_sb)) {
DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
erofs_info, inode->i_sb,
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
inode->i_mapping->a_ops = &z_erofs_aops;
err = 0;
goto out_unlock;
}
DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
erofs_info, inode->i_sb,
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
inode->i_mapping->a_ops = &z_erofs_aops;
err = 0;
goto out_unlock;
#endif
err = -EOPNOTSUPP;
goto out_unlock;

View File

@ -512,6 +512,8 @@ void erofs_fscache_unregister_fs(struct super_block *sb);
struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
char *name, unsigned int flags);
void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache);
struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev);
void erofs_fscache_submit_bio(struct bio *bio);
#else
static inline int erofs_fscache_register_fs(struct super_block *sb)
{
@ -529,6 +531,8 @@ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache)
{
}
static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
static inline void erofs_fscache_submit_bio(struct bio *bio) {}
#endif
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */

View File

@ -1561,7 +1561,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
static void z_erofs_submissionqueue_endio(struct bio *bio)
static void z_erofs_endio(struct bio *bio)
{
struct z_erofs_decompressqueue *q = bio->bi_private;
blk_status_t err = bio->bi_status;
@ -1582,7 +1582,8 @@ static void z_erofs_submissionqueue_endio(struct bio *bio)
if (err)
q->eio = true;
z_erofs_decompress_kickoff(q, -1);
bio_put(bio);
if (bio->bi_bdev)
bio_put(bio);
}
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
@ -1596,7 +1597,6 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
z_erofs_next_pcluster_t owned_head = f->owned_head;
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
erofs_off_t last_pa;
struct block_device *last_bdev;
unsigned int nr_bios = 0;
struct bio *bio = NULL;
unsigned long pflags;
@ -1643,9 +1643,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
continue;
if (bio && (cur != last_pa ||
last_bdev != mdev.m_bdev)) {
submit_bio_retry:
submit_bio(bio);
bio->bi_bdev != mdev.m_bdev)) {
io_retry:
if (!erofs_is_fscache_mode(sb))
submit_bio(bio);
else
erofs_fscache_submit_bio(bio);
if (memstall) {
psi_memstall_leave(&pflags);
memstall = 0;
@ -1660,15 +1664,16 @@ submit_bio_retry:
}
if (!bio) {
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_submissionqueue_endio;
bio = erofs_is_fscache_mode(sb) ?
erofs_fscache_bio_alloc(&mdev) :
bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_endio;
bio->bi_iter.bi_sector = cur >> 9;
bio->bi_private = q[JQ_SUBMIT];
if (readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
last_bdev = mdev.m_bdev;
}
if (cur + bvec.bv_len > end)
@ -1676,7 +1681,7 @@ submit_bio_retry:
DBG_BUGON(bvec.bv_len < sb->s_blocksize);
if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
bvec.bv_offset))
goto submit_bio_retry;
goto io_retry;
last_pa = cur + bvec.bv_len;
bypass = false;
@ -1689,7 +1694,10 @@ submit_bio_retry:
} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
if (bio) {
submit_bio(bio);
if (!erofs_is_fscache_mode(sb))
submit_bio(bio);
else
erofs_fscache_submit_bio(bio);
if (memstall)
psi_memstall_leave(&pflags);
}