mm/mm_init.c: remove arch_reserved_kernel_pages()
Since the current calculation of calc_nr_kernel_pages() has taken into consideration of kernel reserved memory, no need to have arch_reserved_kernel_pages() any more. Link: https://lkml.kernel.org/r/20240325145646.1044760-7-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
90e796e22e
commit
0b52663f75
@ -406,9 +406,5 @@ extern void *abatron_pteptrs[2];
|
|||||||
#include <asm/nohash/mmu.h>
|
#include <asm/nohash/mmu.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
|
|
||||||
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_MMU_H_ */
|
#endif /* _ASM_POWERPC_MMU_H_ */
|
||||||
|
@ -1735,8 +1735,3 @@ static void __init fadump_reserve_crash_area(u64 base)
|
|||||||
memblock_reserve(mstart, msize);
|
memblock_reserve(mstart, msize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long __init arch_reserved_kernel_pages(void)
|
|
||||||
{
|
|
||||||
return memblock_reserved_size() / PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
@ -3261,9 +3261,6 @@ static inline void show_mem(void)
|
|||||||
extern long si_mem_available(void);
|
extern long si_mem_available(void);
|
||||||
extern void si_meminfo(struct sysinfo * val);
|
extern void si_meminfo(struct sysinfo * val);
|
||||||
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
||||||
#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
|
|
||||||
extern unsigned long arch_reserved_kernel_pages(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern __printf(3, 4)
|
extern __printf(3, 4)
|
||||||
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
|
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
|
||||||
|
12
mm/mm_init.c
12
mm/mm_init.c
@ -2374,17 +2374,6 @@ void __init page_alloc_init_late(void)
|
|||||||
page_alloc_sysctl_init();
|
page_alloc_sysctl_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
|
|
||||||
/*
|
|
||||||
* Returns the number of pages that arch has reserved but
|
|
||||||
* is not known to alloc_large_system_hash().
|
|
||||||
*/
|
|
||||||
static unsigned long __init arch_reserved_kernel_pages(void)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adaptive scale is meant to reduce sizes of hash tables on large memory
|
* Adaptive scale is meant to reduce sizes of hash tables on large memory
|
||||||
* machines. As memory size is increased the scale is also increased but at
|
* machines. As memory size is increased the scale is also increased but at
|
||||||
@ -2427,7 +2416,6 @@ void *__init alloc_large_system_hash(const char *tablename,
|
|||||||
if (!numentries) {
|
if (!numentries) {
|
||||||
/* round applicable memory size up to nearest megabyte */
|
/* round applicable memory size up to nearest megabyte */
|
||||||
numentries = nr_kernel_pages;
|
numentries = nr_kernel_pages;
|
||||||
numentries -= arch_reserved_kernel_pages();
|
|
||||||
|
|
||||||
/* It isn't necessary when PAGE_SIZE >= 1MB */
|
/* It isn't necessary when PAGE_SIZE >= 1MB */
|
||||||
if (PAGE_SIZE < SZ_1M)
|
if (PAGE_SIZE < SZ_1M)
|
||||||
|
Loading…
Reference in New Issue
Block a user