mm: convert pagecache_isize_extended to use a folio
Remove four hidden calls to compound_head(). Also exit early if the filesystem block size is >= PAGE_SIZE instead of just equal to PAGE_SIZE. Link: https://lkml.kernel.org/r/20240405180038.2618624-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Pankaj Raghav <p.raghav@samsung.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
55d134a7b4
commit
2ebe90dab9
@ -764,15 +764,15 @@ EXPORT_SYMBOL(truncate_setsize);
|
||||
* @from: original inode size
|
||||
* @to: new inode size
|
||||
*
|
||||
* Handle extension of inode size either caused by extending truncate or by
|
||||
* write starting after current i_size. We mark the page straddling current
|
||||
* i_size RO so that page_mkwrite() is called on the nearest write access to
|
||||
* the page. This way filesystem can be sure that page_mkwrite() is called on
|
||||
* the page before user writes to the page via mmap after the i_size has been
|
||||
* changed.
|
||||
* Handle extension of inode size either caused by extending truncate or
|
||||
* by write starting after current i_size. We mark the page straddling
|
||||
* current i_size RO so that page_mkwrite() is called on the first
|
||||
* write access to the page. The filesystem will update its per-block
|
||||
* information before user writes to the page via mmap after the i_size
|
||||
* has been changed.
|
||||
*
|
||||
* The function must be called after i_size is updated so that page fault
|
||||
* coming after we unlock the page will already see the new i_size.
|
||||
* coming after we unlock the folio will already see the new i_size.
|
||||
* The function must be called while we still hold i_rwsem - this not only
|
||||
* makes sure i_size is stable but also that userspace cannot observe new
|
||||
* i_size value before we are prepared to store mmap writes at new inode size.
|
||||
@ -781,31 +781,29 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
|
||||
{
|
||||
int bsize = i_blocksize(inode);
|
||||
loff_t rounded_from;
|
||||
struct page *page;
|
||||
pgoff_t index;
|
||||
struct folio *folio;
|
||||
|
||||
WARN_ON(to > inode->i_size);
|
||||
|
||||
if (from >= to || bsize == PAGE_SIZE)
|
||||
if (from >= to || bsize >= PAGE_SIZE)
|
||||
return;
|
||||
/* Page straddling @from will not have any hole block created? */
|
||||
rounded_from = round_up(from, bsize);
|
||||
if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
|
||||
return;
|
||||
|
||||
index = from >> PAGE_SHIFT;
|
||||
page = find_lock_page(inode->i_mapping, index);
|
||||
/* Page not cached? Nothing to do */
|
||||
if (!page)
|
||||
folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
|
||||
/* Folio not cached? Nothing to do */
|
||||
if (IS_ERR(folio))
|
||||
return;
|
||||
/*
|
||||
* See clear_page_dirty_for_io() for details why set_page_dirty()
|
||||
* See folio_clear_dirty_for_io() for details why folio_mark_dirty()
|
||||
* is needed.
|
||||
*/
|
||||
if (page_mkclean(page))
|
||||
set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
if (folio_mkclean(folio))
|
||||
folio_mark_dirty(folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
EXPORT_SYMBOL(pagecache_isize_extended);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user