1

buffer: Convert block_write_end() to take a folio

All callers now have a folio, so pass it in instead of converting
from a folio to a page and back to a folio again.  Saves a call
to compound_head().

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-07-10 14:51:11 -04:00 committed by Christian Brauner
parent c4c9c89c8c
commit 97edbc02b2
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
11 changed files with 14 additions and 15 deletions

View File

@ -462,7 +462,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
{
struct folio *folio = page_folio(page);
int ret;
ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
folio_unlock(folio);
folio_put(folio);

View File

@ -2247,9 +2247,8 @@ EXPORT_SYMBOL(block_write_begin);
int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
struct folio *folio, void *fsdata)
{
struct folio *folio = page_folio(page);
size_t start = pos - folio_pos(folio);
if (unlikely(copied < len)) {
@ -2288,7 +2287,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
loff_t old_size = inode->i_size;
bool i_size_changed = false;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/*
* No need to use i_size_read() here, the i_size cannot change under us

View File

@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);

View File

@ -1315,7 +1315,7 @@ static int ext4_write_end(struct file *file,
return ext4_write_inline_data_end(inode, pos, len, copied,
folio);
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/*
* it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
@ -3029,7 +3029,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
* flag, which all that's needed to trigger page writeback.
*/
copied = block_write_end(NULL, mapping, pos, len, copied,
&folio->page, NULL);
folio, NULL);
new_i_size = pos + copied;
/*

View File

@ -900,7 +900,7 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t bh_written;
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
len, copied, &folio->page, NULL);
len, copied, folio, NULL);
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
return bh_written == copied;
}

View File

@ -45,7 +45,7 @@ static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);

View File

@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio,
int err;
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied);
if (IS_DIRSYNC(dir))

View File

@ -533,7 +533,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_page;
block_write_end(NULL, inode->i_mapping, pos, blocksize,
blocksize, page, NULL);
blocksize, folio, NULL);
folio_unlock(folio);
folio_put(folio);

View File

@ -33,7 +33,7 @@ static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);

View File

@ -48,7 +48,7 @@ static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);

View File

@ -262,8 +262,8 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
loff_t, unsigned len, unsigned copied,
struct folio *, void *);
int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);