1

ufs: add ufs_get_locked_folio and ufs_put_locked_folio

Convert the _page variants to call them.  Saves a few hidden calls to
compound_head().

Link: https://lkml.kernel.org/r/20231016201114.1928083-24-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-10-16 21:11:10 +01:00 committed by Andrew Morton
parent 44f6857526
commit 5fb7bd50b3
2 changed files with 34 additions and 22 deletions

View File

@ -229,43 +229,50 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32); ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32);
} }
struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index)
{
struct folio *folio = ufs_get_locked_folio(mapping, index);
if (folio)
return folio_file_page(folio, index);
return NULL;
}
/** /**
* ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist * ufs_get_locked_folio() - locate, pin and lock a pagecache folio, if not exist
* read it from disk. * read it from disk.
* @mapping: the address_space to search * @mapping: the address_space to search
* @index: the page index * @index: the page index
* *
* Locates the desired pagecache page, if not exist we'll read it, * Locates the desired pagecache folio, if not exist we'll read it,
* locks it, increments its reference * locks it, increments its reference
* count and returns its address. * count and returns its address.
* *
*/ */
struct folio *ufs_get_locked_folio(struct address_space *mapping,
struct page *ufs_get_locked_page(struct address_space *mapping,
pgoff_t index) pgoff_t index)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *page = find_lock_page(mapping, index); struct folio *folio = filemap_lock_folio(mapping, index);
if (!page) { if (!folio) {
page = read_mapping_page(mapping, index, NULL); folio = read_mapping_folio(mapping, index, NULL);
if (IS_ERR(page)) { if (IS_ERR(folio)) {
printk(KERN_ERR "ufs_change_blocknr: " printk(KERN_ERR "ufs_change_blocknr: read_mapping_folio error: ino %lu, index: %lu\n",
"read_mapping_page error: ino %lu, index: %lu\n",
mapping->host->i_ino, index); mapping->host->i_ino, index);
return page; return folio;
} }
lock_page(page); folio_lock(folio);
if (unlikely(page->mapping == NULL)) { if (unlikely(folio->mapping == NULL)) {
/* Truncate got there first */ /* Truncate got there first */
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return NULL; return NULL;
} }
} }
if (!page_has_buffers(page)) if (!folio_buffers(folio))
create_empty_buffers(page, 1 << inode->i_blkbits, 0); folio_create_empty_buffers(folio, 1 << inode->i_blkbits, 0);
return page; return folio;
} }

View File

@ -273,12 +273,17 @@ extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struc
extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned); extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
/* This functions works with cache pages*/ /* This functions works with cache pages*/
extern struct page *ufs_get_locked_page(struct address_space *mapping, struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index);
pgoff_t index); struct folio *ufs_get_locked_folio(struct address_space *mapping, pgoff_t index);
static inline void ufs_put_locked_folio(struct folio *folio)
{
folio_unlock(folio);
folio_put(folio);
}
static inline void ufs_put_locked_page(struct page *page) static inline void ufs_put_locked_page(struct page *page)
{ {
unlock_page(page); ufs_put_locked_folio(page_folio(page));
put_page(page);
} }