mm/mm_init.c: not always search next deferred_init_pfn from very beginning
In function deferred_init_memmap(), we call deferred_init_mem_pfn_range_in_zone() to get the next deferred_init_pfn. But we always search it from the very beginning. Since we save the index in i, we can leverage this to search from i next time. [rppt refine the comment] Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Link: https://lore.kernel.org/all/20240605071339.15330-1-richard.weiyang@gmail.com Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
This commit is contained in:
parent
544b8e14c2
commit
f1180fd2a7
@ -299,25 +299,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
|||||||
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
|
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
|
||||||
unsigned long *out_spfn,
|
unsigned long *out_spfn,
|
||||||
unsigned long *out_epfn);
|
unsigned long *out_epfn);
|
||||||
/**
|
|
||||||
* for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
|
|
||||||
* memblock areas
|
|
||||||
* @i: u64 used as loop variable
|
|
||||||
* @zone: zone in which all of the memory blocks reside
|
|
||||||
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
||||||
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
||||||
*
|
|
||||||
* Walks over free (memory && !reserved) areas of memblock in a specific
|
|
||||||
* zone. Available once memblock and an empty zone is initialized. The main
|
|
||||||
* assumption is that the zone start, end, and pgdat have been associated.
|
|
||||||
* This way we can use the zone to determine NUMA node, and if a given part
|
|
||||||
* of the memblock is valid for the zone.
|
|
||||||
*/
|
|
||||||
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
|
|
||||||
for (i = 0, \
|
|
||||||
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
|
|
||||||
i != U64_MAX; \
|
|
||||||
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
|
* for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
|
||||||
|
23
mm/mm_init.c
23
mm/mm_init.c
@ -2021,24 +2021,29 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is meant to pre-load the iterator for the zone init.
|
* This function is meant to pre-load the iterator for the zone init from
|
||||||
* Specifically it walks through the ranges until we are caught up to the
|
* a given point.
|
||||||
* first_init_pfn value and exits there. If we never encounter the value we
|
* Specifically it walks through the ranges starting with initial index
|
||||||
* return false indicating there are no valid ranges left.
|
* passed to it until we are caught up to the first_init_pfn value and
|
||||||
|
* exits there. If we never encounter the value we return false indicating
|
||||||
|
* there are no valid ranges left.
|
||||||
*/
|
*/
|
||||||
static bool __init
|
static bool __init
|
||||||
deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
|
deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
|
||||||
unsigned long *spfn, unsigned long *epfn,
|
unsigned long *spfn, unsigned long *epfn,
|
||||||
unsigned long first_init_pfn)
|
unsigned long first_init_pfn)
|
||||||
{
|
{
|
||||||
u64 j;
|
u64 j = *i;
|
||||||
|
|
||||||
|
if (j == 0)
|
||||||
|
__next_mem_pfn_range_in_zone(&j, zone, spfn, epfn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start out by walking through the ranges in this zone that have
|
* Start out by walking through the ranges in this zone that have
|
||||||
* already been initialized. We don't need to do anything with them
|
* already been initialized. We don't need to do anything with them
|
||||||
* so we just need to flush them out of the system.
|
* so we just need to flush them out of the system.
|
||||||
*/
|
*/
|
||||||
for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
|
for_each_free_mem_pfn_range_in_zone_from(j, zone, spfn, epfn) {
|
||||||
if (*epfn <= first_init_pfn)
|
if (*epfn <= first_init_pfn)
|
||||||
continue;
|
continue;
|
||||||
if (*spfn < first_init_pfn)
|
if (*spfn < first_init_pfn)
|
||||||
@ -2110,7 +2115,7 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
|
|||||||
{
|
{
|
||||||
unsigned long spfn, epfn;
|
unsigned long spfn, epfn;
|
||||||
struct zone *zone = arg;
|
struct zone *zone = arg;
|
||||||
u64 i;
|
u64 i = 0;
|
||||||
|
|
||||||
deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
|
deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
|
||||||
|
|
||||||
@ -2141,7 +2146,7 @@ static int __init deferred_init_memmap(void *data)
|
|||||||
unsigned long start = jiffies;
|
unsigned long start = jiffies;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int max_threads;
|
int max_threads;
|
||||||
u64 i;
|
u64 i = 0;
|
||||||
|
|
||||||
/* Bind memory initialisation thread to a local node if possible */
|
/* Bind memory initialisation thread to a local node if possible */
|
||||||
if (!cpumask_empty(cpumask))
|
if (!cpumask_empty(cpumask))
|
||||||
@ -2216,7 +2221,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
|
|||||||
unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
|
unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
|
||||||
unsigned long spfn, epfn, flags;
|
unsigned long spfn, epfn, flags;
|
||||||
unsigned long nr_pages = 0;
|
unsigned long nr_pages = 0;
|
||||||
u64 i;
|
u64 i = 0;
|
||||||
|
|
||||||
/* Only the last zone may have deferred pages */
|
/* Only the last zone may have deferred pages */
|
||||||
if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
|
if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
|
||||||
|
Loading…
Reference in New Issue
Block a user