1

mm: drop CONFIG_HAVE_ARCH_NODEDATA_EXTENSION

There are no users of HAVE_ARCH_NODEDATA_EXTENSION left, so
arch_alloc_nodedata() and arch_refresh_nodedata() are not needed anymore.

Replace the call to arch_alloc_nodedata() in free_area_init() with a new
helper alloc_offline_node_data(), remove arch_refresh_nodedata() and
cleanup include/linux/memory_hotplug.h from the associated ifdefery.

Link: https://lkml.kernel.org/r/20240807064110.1003856-9-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Tested-by: Zi Yan <ziy@nvidia.com> # for x86_64 and arm64
Acked-by: Dan Williams <dan.j.williams@intel.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Cc: Rob Herring (Arm) <robh@kernel.org>
Cc: Samuel Holland <samuel.holland@sifive.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Mike Rapoport (Microsoft) 2024-08-07 09:40:52 +03:00 committed by Andrew Morton
parent 46bcce5031
commit ec164cf1dd
4 changed files with 18 additions and 56 deletions

View File

@ -16,54 +16,6 @@ struct resource;
struct vmem_altmap; struct vmem_altmap;
struct dev_pagemap; struct dev_pagemap;
#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
/*
* For supporting node-hotadd, we have to allocate a new pgdat.
*
* If an arch has generic style NODE_DATA(),
* node_data[nid] = kzalloc() works well. But it depends on the architecture.
*
* In general, generic_alloc_nodedata() is used.
*
*/
extern pg_data_t *arch_alloc_nodedata(int nid);
extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
#ifdef CONFIG_NUMA
/*
* XXX: node aware allocation can't work well to get new node's memory at this time.
* Because, pgdat for the new node is not allocated/initialized yet itself.
* To use new node's memory, more consideration will be necessary.
*/
#define generic_alloc_nodedata(nid) \
({ \
memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
})
extern pg_data_t *node_data[];
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
node_data[nid] = pgdat;
}
#else /* !CONFIG_NUMA */
/* never called */
static inline pg_data_t *generic_alloc_nodedata(int nid)
{
BUG();
return NULL;
}
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
}
#endif /* CONFIG_NUMA */
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn); struct page *pfn_to_online_page(unsigned long pfn);

View File

@ -33,6 +33,8 @@ static inline bool numa_valid_node(int nid)
extern struct pglist_data *node_data[]; extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid]) #define NODE_DATA(nid) (node_data[nid])
void __init alloc_offline_node_data(int nid);
/* Generic implementation available */ /* Generic implementation available */
int numa_nearest_node(int node, unsigned int state); int numa_nearest_node(int node, unsigned int state);
@ -60,6 +62,8 @@ static inline int phys_to_target_node(u64 start)
{ {
return 0; return 0;
} }
static inline void alloc_offline_node_data(int nid) {}
#endif #endif
#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE) #define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)

View File

@ -1835,14 +1835,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
for_each_node(nid) { for_each_node(nid) {
pg_data_t *pgdat; pg_data_t *pgdat;
if (!node_online(nid)) { if (!node_online(nid))
/* Allocator not initialized yet */ alloc_offline_node_data(nid);
pgdat = arch_alloc_nodedata(nid);
if (!pgdat)
panic("Cannot allocate %zuB for node %d.\n",
sizeof(*pgdat), nid);
arch_refresh_nodedata(nid, pgdat);
}
pgdat = NODE_DATA(nid); pgdat = NODE_DATA(nid);
free_area_init_node(nid); free_area_init_node(nid);

View File

@ -6,6 +6,18 @@
struct pglist_data *node_data[MAX_NUMNODES]; struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
void __init alloc_offline_node_data(int nid)
{
pg_data_t *pgdat;
pgdat = memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);
if (!pgdat)
panic("Cannot allocate %zuB for node %d.\n",
sizeof(*pgdat), nid);
node_data[nid] = pgdat;
}
/* Stub functions: */ /* Stub functions: */
#ifndef memory_add_physaddr_to_nid #ifndef memory_add_physaddr_to_nid