1

mm: shmem: move shmem_huge_global_enabled() into shmem_allowable_huge_orders()

Move shmem_huge_global_enabled() into shmem_allowable_huge_orders(), so
that shmem_allowable_huge_orders() can also help to find the allowable
huge orders for tmpfs.  Moreover the shmem_huge_global_enabled() can
become static.  While we are at it, passing the vma instead of mm for
shmem_huge_global_enabled() makes code cleaner.

No functional changes.

Link: https://lkml.kernel.org/r/8e825146bb29ee1a1c7bd64d2968ff3e19be7815.1721626645.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Baolin Wang 2024-07-22 13:43:19 +08:00 committed by Andrew Morton
parent d58a2a581f
commit 6beeab870e
3 changed files with 35 additions and 36 deletions

View File

@ -111,21 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
struct mm_struct *mm, unsigned long vm_flags);
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge);
bool shmem_huge_force);
#else
static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
bool shmem_huge_force, struct mm_struct *mm,
unsigned long vm_flags)
{
return false;
}
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge)
bool shmem_huge_force)
{
return 0;
}

View File

@ -159,16 +159,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* Must be done before hugepage flags check since shmem has its
* own flags.
*/
if (!in_pf && shmem_file(vma->vm_file)) {
bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
vma->vm_pgoff, !enforce_sysfs,
vma->vm_mm, vm_flags);
if (!vma_is_anon_shmem(vma))
return global_huge ? orders : 0;
if (!in_pf && shmem_file(vma->vm_file))
return shmem_allowable_huge_orders(file_inode(vma->vm_file),
vma, vma->vm_pgoff, global_huge);
}
vma, vma->vm_pgoff,
!enforce_sysfs);
if (!vma_is_anonymous(vma)) {
/*

View File

@ -549,9 +549,10 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
bool shmem_huge_force, struct mm_struct *mm,
bool shmem_huge_force, struct vm_area_struct *vma,
unsigned long vm_flags)
{
struct mm_struct *mm = vma ? vma->vm_mm : NULL;
loff_t i_size;
if (!S_ISREG(inode->i_mode))
@ -581,15 +582,15 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
}
}
bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
bool shmem_huge_force, struct mm_struct *mm,
static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
bool shmem_huge_force, struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
return false;
return __shmem_huge_global_enabled(inode, index, shmem_huge_force,
mm, vm_flags);
vma, vm_flags);
}
#if defined(CONFIG_SYSFS)
@ -772,6 +773,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
{
return 0;
}
static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
bool shmem_huge_force, struct vm_area_struct *vma,
unsigned long vm_flags)
{
return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
@ -1625,22 +1633,33 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge)
bool shmem_huge_force)
{
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma->vm_flags;
unsigned long vm_flags = vma ? vma->vm_flags : 0;
bool global_huge;
loff_t i_size;
int order;
if ((vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return 0;
/* If the hardware/firmware marked hugepage support disabled. */
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
return 0;
global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
vma, vm_flags);
if (!vma || !vma_is_anon_shmem(vma)) {
/*
* For tmpfs, we now only support PMD sized THP if huge page
* is enabled, otherwise fallback to order 0.
*/
return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
}
/*
* Following the 'deny' semantics of the top level, force the huge
* option off from all mounts.
@ -2077,7 +2096,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
struct mm_struct *fault_mm;
struct folio *folio;
int error;
bool alloced, huge;
bool alloced;
unsigned long orders = 0;
if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
@ -2150,14 +2169,8 @@ repeat:
return 0;
}
huge = shmem_huge_global_enabled(inode, index, false, fault_mm,
vma ? vma->vm_flags : 0);
/* Find hugepage orders that are allowed for anonymous shmem. */
if (vma && vma_is_anon_shmem(vma))
orders = shmem_allowable_huge_orders(inode, vma, index, huge);
else if (huge)
orders = BIT(HPAGE_PMD_ORDER);
/* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
orders = shmem_allowable_huge_orders(inode, vma, index, false);
if (orders > 0) {
gfp_t huge_gfp;