buffer: Convert block_write_end() to take a folio
All callers now have a folio, so pass it in instead of converting from a folio to a page and back to a folio again. Saves a call to compound_head(). Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
c4c9c89c8c
commit
97edbc02b2
@ -462,7 +462,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
|
|||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
struct folio *folio = page_folio(page);
|
||||||
int ret;
|
int ret;
|
||||||
ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||||
|
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
@ -2247,9 +2247,8 @@ EXPORT_SYMBOL(block_write_begin);
|
|||||||
|
|
||||||
int block_write_end(struct file *file, struct address_space *mapping,
|
int block_write_end(struct file *file, struct address_space *mapping,
|
||||||
loff_t pos, unsigned len, unsigned copied,
|
loff_t pos, unsigned len, unsigned copied,
|
||||||
struct page *page, void *fsdata)
|
struct folio *folio, void *fsdata)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
size_t start = pos - folio_pos(folio);
|
size_t start = pos - folio_pos(folio);
|
||||||
|
|
||||||
if (unlikely(copied < len)) {
|
if (unlikely(copied < len)) {
|
||||||
@ -2288,7 +2287,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
|||||||
loff_t old_size = inode->i_size;
|
loff_t old_size = inode->i_size;
|
||||||
bool i_size_changed = false;
|
bool i_size_changed = false;
|
||||||
|
|
||||||
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No need to use i_size_read() here, the i_size cannot change under us
|
* No need to use i_size_read() here, the i_size cannot change under us
|
||||||
|
@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
|||||||
struct inode *dir = mapping->host;
|
struct inode *dir = mapping->host;
|
||||||
|
|
||||||
inode_inc_iversion(dir);
|
inode_inc_iversion(dir);
|
||||||
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||||
|
|
||||||
if (pos+len > dir->i_size) {
|
if (pos+len > dir->i_size) {
|
||||||
i_size_write(dir, pos+len);
|
i_size_write(dir, pos+len);
|
||||||
|
@ -1315,7 +1315,7 @@ static int ext4_write_end(struct file *file,
|
|||||||
return ext4_write_inline_data_end(inode, pos, len, copied,
|
return ext4_write_inline_data_end(inode, pos, len, copied,
|
||||||
folio);
|
folio);
|
||||||
|
|
||||||
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
|
||||||
/*
|
/*
|
||||||
* it's important to update i_size while still holding folio lock:
|
* it's important to update i_size while still holding folio lock:
|
||||||
* page writeout could otherwise come in and zero beyond i_size.
|
* page writeout could otherwise come in and zero beyond i_size.
|
||||||
@ -3029,7 +3029,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
|
|||||||
* flag, which all that's needed to trigger page writeback.
|
* flag, which all that's needed to trigger page writeback.
|
||||||
*/
|
*/
|
||||||
copied = block_write_end(NULL, mapping, pos, len, copied,
|
copied = block_write_end(NULL, mapping, pos, len, copied,
|
||||||
&folio->page, NULL);
|
folio, NULL);
|
||||||
new_i_size = pos + copied;
|
new_i_size = pos + copied;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -900,7 +900,7 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
|
|||||||
size_t bh_written;
|
size_t bh_written;
|
||||||
|
|
||||||
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
|
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
|
||||||
len, copied, &folio->page, NULL);
|
len, copied, folio, NULL);
|
||||||
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
|
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
|
||||||
return bh_written == copied;
|
return bh_written == copied;
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
|||||||
struct address_space *mapping = folio->mapping;
|
struct address_space *mapping = folio->mapping;
|
||||||
struct inode *dir = mapping->host;
|
struct inode *dir = mapping->host;
|
||||||
|
|
||||||
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||||
|
|
||||||
if (pos+len > dir->i_size) {
|
if (pos+len > dir->i_size) {
|
||||||
i_size_write(dir, pos+len);
|
i_size_write(dir, pos+len);
|
||||||
|
@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
|
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
|
||||||
copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||||
if (pos + copied > dir->i_size)
|
if (pos + copied > dir->i_size)
|
||||||
i_size_write(dir, pos + copied);
|
i_size_write(dir, pos + copied);
|
||||||
if (IS_DIRSYNC(dir))
|
if (IS_DIRSYNC(dir))
|
||||||
|
@ -533,7 +533,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
|||||||
goto failed_page;
|
goto failed_page;
|
||||||
|
|
||||||
block_write_end(NULL, inode->i_mapping, pos, blocksize,
|
block_write_end(NULL, inode->i_mapping, pos, blocksize,
|
||||||
blocksize, page, NULL);
|
blocksize, folio, NULL);
|
||||||
|
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
@ -33,7 +33,7 @@ static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
|||||||
struct address_space *mapping = folio->mapping;
|
struct address_space *mapping = folio->mapping;
|
||||||
struct inode *dir = mapping->host;
|
struct inode *dir = mapping->host;
|
||||||
|
|
||||||
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||||
if (pos+len > dir->i_size) {
|
if (pos+len > dir->i_size) {
|
||||||
i_size_write(dir, pos+len);
|
i_size_write(dir, pos+len);
|
||||||
mark_inode_dirty(dir);
|
mark_inode_dirty(dir);
|
||||||
|
@ -48,7 +48,7 @@ static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
|
|||||||
struct inode *dir = mapping->host;
|
struct inode *dir = mapping->host;
|
||||||
|
|
||||||
inode_inc_iversion(dir);
|
inode_inc_iversion(dir);
|
||||||
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
|
block_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
||||||
if (pos+len > dir->i_size) {
|
if (pos+len > dir->i_size) {
|
||||||
i_size_write(dir, pos+len);
|
i_size_write(dir, pos+len);
|
||||||
mark_inode_dirty(dir);
|
mark_inode_dirty(dir);
|
||||||
|
@ -262,8 +262,8 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
|||||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||||
get_block_t *get_block);
|
get_block_t *get_block);
|
||||||
int block_write_end(struct file *, struct address_space *,
|
int block_write_end(struct file *, struct address_space *,
|
||||||
loff_t, unsigned, unsigned,
|
loff_t, unsigned len, unsigned copied,
|
||||||
struct page *, void *);
|
struct folio *, void *);
|
||||||
int generic_write_end(struct file *, struct address_space *,
|
int generic_write_end(struct file *, struct address_space *,
|
||||||
loff_t, unsigned, unsigned,
|
loff_t, unsigned, unsigned,
|
||||||
struct page *, void *);
|
struct page *, void *);
|
||||||
|
Loading…
Reference in New Issue
Block a user