2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Generic VM initialization for x86-64 NUMA setups.
|
|
|
|
* Copyright 2002,2003 Andi Kleen, SuSE Labs.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/mmzone.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
|
|
|
|
#include <asm/e820.h>
|
|
|
|
#include <asm/proto.h>
|
|
|
|
#include <asm/dma.h>
|
|
|
|
#include <asm/numa.h>
|
|
|
|
#include <asm/acpi.h>
|
2008-01-30 05:30:16 -07:00
|
|
|
#include <asm/k8.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
#ifndef Dprintk
|
|
|
|
#define Dprintk(x...)
|
|
|
|
#endif
|
|
|
|
|
2005-09-06 15:17:45 -07:00
|
|
|
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
|
2005-04-16 15:20:36 -07:00
|
|
|
bootmem_data_t plat_node_bdata[MAX_NUMNODES];
|
|
|
|
|
2006-03-25 08:31:46 -07:00
|
|
|
struct memnode memnode;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-09-12 09:49:24 -07:00
|
|
|
unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
|
|
|
|
[0 ... NR_CPUS-1] = NUMA_NO_NODE
|
2005-09-12 09:49:24 -07:00
|
|
|
};
|
2005-09-12 09:49:24 -07:00
|
|
|
unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
|
|
|
|
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
|
|
|
|
};
|
|
|
|
cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
int numa_off __initdata;
|
2007-02-13 05:26:19 -07:00
|
|
|
unsigned long __initdata nodemap_addr;
|
|
|
|
unsigned long __initdata nodemap_size;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-11-05 09:25:54 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a shift value, try to populate memnodemap[]
|
|
|
|
* Returns :
|
|
|
|
* 1 if OK
|
|
|
|
* 0 if memnodmap[] too small (of shift too small)
|
|
|
|
* -1 if node overlap or lost ram (shift too big)
|
|
|
|
*/
|
2006-01-11 14:44:30 -07:00
|
|
|
static int __init
|
2006-03-25 08:29:12 -07:00
|
|
|
populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
int i;
|
2005-11-05 09:25:54 -07:00
|
|
|
int res = -1;
|
|
|
|
unsigned long addr, end;
|
2005-07-28 21:15:38 -07:00
|
|
|
|
2007-02-13 05:26:19 -07:00
|
|
|
memset(memnodemap, 0xff, memnodemapsize);
|
2005-07-28 21:15:38 -07:00
|
|
|
for (i = 0; i < numnodes; i++) {
|
2005-11-05 09:25:54 -07:00
|
|
|
addr = nodes[i].start;
|
|
|
|
end = nodes[i].end;
|
|
|
|
if (addr >= end)
|
2005-07-28 21:15:38 -07:00
|
|
|
continue;
|
2007-02-13 05:26:19 -07:00
|
|
|
if ((end >> shift) >= memnodemapsize)
|
2005-11-05 09:25:54 -07:00
|
|
|
return 0;
|
|
|
|
do {
|
|
|
|
if (memnodemap[addr >> shift] != 0xff)
|
2005-07-28 21:15:38 -07:00
|
|
|
return -1;
|
|
|
|
memnodemap[addr >> shift] = i;
|
2007-02-13 05:26:19 -07:00
|
|
|
addr += (1UL << shift);
|
2005-11-05 09:25:54 -07:00
|
|
|
} while (addr < end);
|
|
|
|
res = 1;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2005-11-05 09:25:54 -07:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2007-02-13 05:26:19 -07:00
|
|
|
static int __init allocate_cachealigned_memnodemap(void)
|
|
|
|
{
|
|
|
|
unsigned long pad, pad_addr;
|
|
|
|
|
|
|
|
memnodemap = memnode.embedded_map;
|
2007-02-13 05:26:20 -07:00
|
|
|
if (memnodemapsize <= 48)
|
2007-02-13 05:26:19 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
pad = L1_CACHE_BYTES - 1;
|
|
|
|
pad_addr = 0x8000;
|
|
|
|
nodemap_size = pad + memnodemapsize;
|
|
|
|
nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
|
|
|
|
nodemap_size);
|
|
|
|
if (nodemap_addr == -1UL) {
|
|
|
|
printk(KERN_ERR
|
|
|
|
"NUMA: Unable to allocate Memory to Node hash map\n");
|
|
|
|
nodemap_addr = nodemap_size = 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
pad_addr = (nodemap_addr + pad) & ~pad;
|
|
|
|
memnodemap = phys_to_virt(pad_addr);
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
|
|
|
|
nodemap_addr, nodemap_addr + nodemap_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The LSB of all start and end addresses in the node map is the value of the
|
|
|
|
* maximum possible shift.
|
|
|
|
*/
|
|
|
|
static int __init
|
|
|
|
extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
|
2005-11-05 09:25:54 -07:00
|
|
|
{
|
2007-02-13 05:26:20 -07:00
|
|
|
int i, nodes_used = 0;
|
2007-02-13 05:26:19 -07:00
|
|
|
unsigned long start, end;
|
|
|
|
unsigned long bitfield = 0, memtop = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < numnodes; i++) {
|
|
|
|
start = nodes[i].start;
|
|
|
|
end = nodes[i].end;
|
|
|
|
if (start >= end)
|
|
|
|
continue;
|
2007-02-13 05:26:20 -07:00
|
|
|
bitfield |= start;
|
|
|
|
nodes_used++;
|
2007-02-13 05:26:19 -07:00
|
|
|
if (end > memtop)
|
|
|
|
memtop = end;
|
|
|
|
}
|
2007-02-13 05:26:20 -07:00
|
|
|
if (nodes_used <= 1)
|
|
|
|
i = 63;
|
|
|
|
else
|
|
|
|
i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
|
2007-02-13 05:26:19 -07:00
|
|
|
memnodemapsize = (memtop >> i)+1;
|
|
|
|
return i;
|
|
|
|
}
|
2005-11-05 09:25:54 -07:00
|
|
|
|
2007-02-13 05:26:19 -07:00
|
|
|
int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
|
|
|
|
{
|
|
|
|
int shift;
|
2005-11-05 09:25:54 -07:00
|
|
|
|
2007-02-13 05:26:19 -07:00
|
|
|
shift = extract_lsb_from_nodes(nodes, numnodes);
|
|
|
|
if (allocate_cachealigned_memnodemap())
|
|
|
|
return -1;
|
2006-01-11 14:44:33 -07:00
|
|
|
printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
|
2005-11-05 09:25:54 -07:00
|
|
|
shift);
|
|
|
|
|
|
|
|
if (populate_memnodemap(nodes, numnodes, shift) != 1) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"Your memory is not aligned you need to rebuild your kernel "
|
|
|
|
"with a bigger NODEMAPSIZE shift=%d\n",
|
|
|
|
shift);
|
|
|
|
return -1;
|
|
|
|
}
|
2005-07-28 21:15:38 -07:00
|
|
|
return shift;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2005-06-23 00:08:07 -07:00
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
|
|
int early_pfn_to_nid(unsigned long pfn)
|
|
|
|
{
|
|
|
|
return phys_to_nid(pfn << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-04-07 10:49:21 -07:00
|
|
|
static void * __init
|
|
|
|
early_node_mem(int nodeid, unsigned long start, unsigned long end,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
unsigned long mem = find_e820_area(start, end, size);
|
|
|
|
void *ptr;
|
|
|
|
if (mem != -1L)
|
|
|
|
return __va(mem);
|
|
|
|
ptr = __alloc_bootmem_nopanic(size,
|
|
|
|
SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
|
2007-10-17 09:04:35 -07:00
|
|
|
if (ptr == NULL) {
|
2006-04-07 10:49:21 -07:00
|
|
|
printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
|
|
|
|
size, nodeid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/* Initialize bootmem allocator for a node */
|
|
|
|
void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
|
|
|
|
unsigned long nodedata_phys;
|
2006-04-07 10:49:21 -07:00
|
|
|
void *bootmap;
|
2005-04-16 15:20:36 -07:00
|
|
|
const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
|
|
|
|
|
|
|
|
start = round_up(start, ZONE_ALIGN);
|
|
|
|
|
2006-01-11 14:44:33 -07:00
|
|
|
printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
start_pfn = start >> PAGE_SHIFT;
|
|
|
|
end_pfn = end >> PAGE_SHIFT;
|
|
|
|
|
2006-04-07 10:49:21 -07:00
|
|
|
node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
|
|
|
|
if (node_data[nodeid] == NULL)
|
|
|
|
return;
|
|
|
|
nodedata_phys = __pa(node_data[nodeid]);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
|
|
|
|
NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
|
|
|
|
NODE_DATA(nodeid)->node_start_pfn = start_pfn;
|
|
|
|
NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
|
|
|
|
|
|
|
|
/* Find a place for the bootmem map */
|
|
|
|
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
|
|
|
|
bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
|
2006-04-07 10:49:21 -07:00
|
|
|
bootmap = early_node_mem(nodeid, bootmap_start, end,
|
|
|
|
bootmap_pages<<PAGE_SHIFT);
|
|
|
|
if (bootmap == NULL) {
|
|
|
|
if (nodedata_phys < start || nodedata_phys >= end)
|
|
|
|
free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
|
|
|
|
node_data[nodeid] = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bootmap_start = __pa(bootmap);
|
2005-04-16 15:20:36 -07:00
|
|
|
Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
|
|
|
|
|
|
|
|
bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
|
|
|
|
bootmap_start >> PAGE_SHIFT,
|
|
|
|
start_pfn, end_pfn);
|
|
|
|
|
2006-09-27 01:49:52 -07:00
|
|
|
free_bootmem_with_active_regions(nodeid, end);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
|
|
|
|
reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
|
2006-04-07 10:49:18 -07:00
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
srat_reserve_add_area(nodeid);
|
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
node_set_online(nodeid);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize final allocator for a zone */
|
|
|
|
void __init setup_node_zones(int nodeid)
|
|
|
|
{
|
2006-03-25 08:31:10 -07:00
|
|
|
unsigned long start_pfn, end_pfn, memmapsize, limit;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-11-05 09:25:53 -07:00
|
|
|
start_pfn = node_start_pfn(nodeid);
|
|
|
|
end_pfn = node_end_pfn(nodeid);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-09-27 01:49:52 -07:00
|
|
|
Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
|
2005-11-05 09:25:53 -07:00
|
|
|
nodeid, start_pfn, end_pfn);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-03-25 08:31:10 -07:00
|
|
|
/* Try to allocate mem_map at end to not fill up precious <4GB
|
|
|
|
memory. */
|
|
|
|
memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
|
|
|
|
limit = end_pfn << PAGE_SHIFT;
|
2006-04-22 02:35:41 -07:00
|
|
|
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
2006-03-25 08:31:10 -07:00
|
|
|
NODE_DATA(nodeid)->node_mem_map =
|
|
|
|
__alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
|
|
|
|
memmapsize, SMP_CACHE_BYTES,
|
|
|
|
round_down(limit - memmapsize, PAGE_SIZE),
|
|
|
|
limit);
|
2006-04-22 02:35:41 -07:00
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init numa_init_array(void)
|
|
|
|
{
|
|
|
|
int rr, i;
|
|
|
|
/* There are unfortunately some poorly designed mainboards around
|
|
|
|
that only connect memory to a single CPU. This breaks the 1:1 cpu->node
|
|
|
|
mapping. To avoid this fill in the mapping for all possible
|
|
|
|
CPUs, as the number of CPUs is not known yet.
|
|
|
|
We round robin the existing nodes. */
|
2005-09-30 11:59:22 -07:00
|
|
|
rr = first_node(node_online_map);
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i = 0; i < NR_CPUS; i++) {
|
x86: fix cpu_to_node references
In x86_64 and i386 architectures most arrays that are sized using
NR_CPUS lay in local memory on node 0. Not only will most (99%?) of the
systems not use all the slots in these arrays, particularly when NR_CPUS
is increased to accommodate future very high cpu count systems, but a
number of cache lines are passed unnecessarily on the system bus when
these arrays are referenced by cpus on other nodes.
Typically, the values in these arrays are referenced by the cpu
accessing it's own values, though when passing IPI interrupts, the cpu
does access the data relevant to the targeted cpu/node. Of course, if
the referencing cpu is not on node 0, then the reference will still
require cross node exchanges of cache lines. A common use of this is
for an interrupt service routine to pass the interrupt to other cpus
local to that node.
Ideally, all the elements in these arrays should be moved to the per_cpu
data area. In some cases (such as x86_cpu_to_apicid) the array is
referenced before the per_cpu data areas are setup. In this case, a
static array is declared in the __initdata area and initialized by the
booting cpu (BSP). The values are then moved to the per_cpu area after
it is initialized and the original static array is freed with the rest
of the __initdata.
This patch:
Fix four instances where cpu_to_node is referenced by array instead of
via the cpu_to_node macro. This is preparation to moving it to the
per_cpu data area.
Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2007-10-17 09:04:39 -07:00
|
|
|
if (cpu_to_node(i) != NUMA_NO_NODE)
|
2005-04-16 15:20:36 -07:00
|
|
|
continue;
|
2005-11-05 09:25:53 -07:00
|
|
|
numa_set_node(i, rr);
|
2005-04-16 15:20:36 -07:00
|
|
|
rr = next_node(rr, node_online_map);
|
|
|
|
if (rr == MAX_NUMNODES)
|
|
|
|
rr = first_node(node_online_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA_EMU
|
2007-02-13 05:26:22 -07:00
|
|
|
/* Numa emulation */
|
2007-05-02 10:27:09 -07:00
|
|
|
char *cmdline __initdata;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2007-02-13 05:26:22 -07:00
|
|
|
/*
|
2007-05-02 10:27:09 -07:00
|
|
|
* Setups up nid to range from addr to addr + size. If the end boundary is
|
|
|
|
* greater than max_addr, then max_addr is used instead. The return value is 0
|
|
|
|
* if there is additional memory left for allocation past addr and -1 otherwise.
|
|
|
|
* addr is adjusted to be at the end of the node.
|
2007-02-13 05:26:22 -07:00
|
|
|
*/
|
2007-05-02 10:27:09 -07:00
|
|
|
static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
|
|
|
|
u64 size, u64 max_addr)
|
2007-02-13 05:26:22 -07:00
|
|
|
{
|
2007-05-02 10:27:09 -07:00
|
|
|
int ret = 0;
|
|
|
|
nodes[nid].start = *addr;
|
|
|
|
*addr += size;
|
|
|
|
if (*addr >= max_addr) {
|
|
|
|
*addr = max_addr;
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
nodes[nid].end = *addr;
|
2007-05-02 10:27:20 -07:00
|
|
|
node_set(nid, node_possible_map);
|
2007-05-02 10:27:09 -07:00
|
|
|
printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
|
|
|
|
nodes[nid].start, nodes[nid].end,
|
|
|
|
(nodes[nid].end - nodes[nid].start) >> 20);
|
|
|
|
return ret;
|
2007-02-13 05:26:22 -07:00
|
|
|
}
|
|
|
|
|
2007-05-02 10:27:09 -07:00
|
|
|
/*
|
|
|
|
* Splits num_nodes nodes up equally starting at node_start. The return value
|
|
|
|
* is the number of nodes split up and addr is adjusted to be at the end of the
|
|
|
|
* last node allocated.
|
|
|
|
*/
|
|
|
|
static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
|
|
|
|
u64 max_addr, int node_start,
|
|
|
|
int num_nodes)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2007-05-02 10:27:09 -07:00
|
|
|
unsigned int big;
|
|
|
|
u64 size;
|
|
|
|
int i;
|
2007-02-13 05:26:22 -07:00
|
|
|
|
2007-05-02 10:27:09 -07:00
|
|
|
if (num_nodes <= 0)
|
|
|
|
return -1;
|
|
|
|
if (num_nodes > MAX_NUMNODES)
|
|
|
|
num_nodes = MAX_NUMNODES;
|
2007-07-21 08:11:29 -07:00
|
|
|
size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
|
2007-05-02 10:27:09 -07:00
|
|
|
num_nodes;
|
2007-02-13 05:26:22 -07:00
|
|
|
/*
|
2007-05-02 10:27:09 -07:00
|
|
|
* Calculate the number of big nodes that can be allocated as a result
|
|
|
|
* of consolidating the leftovers.
|
2007-02-13 05:26:22 -07:00
|
|
|
*/
|
2007-05-02 10:27:09 -07:00
|
|
|
big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
|
|
|
|
FAKE_NODE_MIN_SIZE;
|
|
|
|
|
|
|
|
/* Round down to nearest FAKE_NODE_MIN_SIZE. */
|
|
|
|
size &= FAKE_NODE_MIN_HASH_MASK;
|
|
|
|
if (!size) {
|
|
|
|
printk(KERN_ERR "Not enough memory for each node. "
|
|
|
|
"NUMA emulation disabled.\n");
|
|
|
|
return -1;
|
2007-02-13 05:26:22 -07:00
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
|
|
|
|
for (i = node_start; i < num_nodes + node_start; i++) {
|
|
|
|
u64 end = *addr + size;
|
2007-02-13 05:26:22 -07:00
|
|
|
if (i < big)
|
|
|
|
end += FAKE_NODE_MIN_SIZE;
|
|
|
|
/*
|
2007-05-02 10:27:09 -07:00
|
|
|
* The final node can have the remaining system RAM. Other
|
|
|
|
* nodes receive roughly the same amount of available pages.
|
2007-02-13 05:26:22 -07:00
|
|
|
*/
|
2007-05-02 10:27:09 -07:00
|
|
|
if (i == num_nodes + node_start - 1)
|
|
|
|
end = max_addr;
|
|
|
|
else
|
2007-07-21 08:11:29 -07:00
|
|
|
while (end - *addr - e820_hole_size(*addr, end) <
|
2007-05-02 10:27:09 -07:00
|
|
|
size) {
|
|
|
|
end += FAKE_NODE_MIN_SIZE;
|
|
|
|
if (end > max_addr) {
|
|
|
|
end = max_addr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return i - node_start + 1;
|
|
|
|
}
|
|
|
|
|
2007-05-02 10:27:09 -07:00
|
|
|
/*
|
|
|
|
* Splits the remaining system RAM into chunks of size. The remaining memory is
|
|
|
|
* always assigned to a final node and can be asymmetric. Returns the number of
|
|
|
|
* nodes split.
|
|
|
|
*/
|
|
|
|
static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
|
|
|
|
u64 max_addr, int node_start, u64 size)
|
|
|
|
{
|
|
|
|
int i = node_start;
|
|
|
|
size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
|
|
|
|
while (!setup_node_range(i++, nodes, addr, size, max_addr))
|
|
|
|
;
|
|
|
|
return i - node_start;
|
|
|
|
}
|
|
|
|
|
2007-05-02 10:27:09 -07:00
|
|
|
/*
|
|
|
|
* Sets up the system RAM area from start_pfn to end_pfn according to the
|
|
|
|
* numa=fake command-line option.
|
|
|
|
*/
|
|
|
|
static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
struct bootnode nodes[MAX_NUMNODES];
|
|
|
|
u64 addr = start_pfn << PAGE_SHIFT;
|
|
|
|
u64 max_addr = end_pfn << PAGE_SHIFT;
|
|
|
|
int num_nodes = 0;
|
2007-05-02 10:27:09 -07:00
|
|
|
int coeff_flag;
|
|
|
|
int coeff = -1;
|
|
|
|
int num = 0;
|
2007-05-02 10:27:09 -07:00
|
|
|
u64 size;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(&nodes, 0, sizeof(nodes));
|
|
|
|
/*
|
|
|
|
* If the numa=fake command-line is just a single number N, split the
|
|
|
|
* system RAM into N fake nodes.
|
|
|
|
*/
|
|
|
|
if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
|
|
|
|
num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,
|
|
|
|
simple_strtol(cmdline, NULL, 0));
|
|
|
|
if (num_nodes < 0)
|
|
|
|
return num_nodes;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse the command line. */
|
2007-05-02 10:27:09 -07:00
|
|
|
for (coeff_flag = 0; ; cmdline++) {
|
2007-05-02 10:27:09 -07:00
|
|
|
if (*cmdline && isdigit(*cmdline)) {
|
|
|
|
num = num * 10 + *cmdline - '0';
|
|
|
|
continue;
|
2007-02-13 05:26:22 -07:00
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
if (*cmdline == '*') {
|
|
|
|
if (num > 0)
|
|
|
|
coeff = num;
|
|
|
|
coeff_flag = 1;
|
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
if (!*cmdline || *cmdline == ',') {
|
2007-05-02 10:27:09 -07:00
|
|
|
if (!coeff_flag)
|
|
|
|
coeff = 1;
|
2007-05-02 10:27:09 -07:00
|
|
|
/*
|
|
|
|
* Round down to the nearest FAKE_NODE_MIN_SIZE.
|
|
|
|
* Command-line coefficients are in megabytes.
|
|
|
|
*/
|
|
|
|
size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
|
2007-05-02 10:27:09 -07:00
|
|
|
if (size)
|
2007-05-02 10:27:09 -07:00
|
|
|
for (i = 0; i < coeff; i++, num_nodes++)
|
|
|
|
if (setup_node_range(num_nodes, nodes,
|
|
|
|
&addr, size, max_addr) < 0)
|
|
|
|
goto done;
|
2007-05-02 10:27:09 -07:00
|
|
|
if (!*cmdline)
|
|
|
|
break;
|
|
|
|
coeff_flag = 0;
|
|
|
|
coeff = -1;
|
2007-02-13 05:26:22 -07:00
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
num = 0;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
if (!num_nodes)
|
|
|
|
return -1;
|
2007-05-02 10:27:09 -07:00
|
|
|
/* Fill remainder of system RAM, if appropriate. */
|
2007-05-02 10:27:09 -07:00
|
|
|
if (addr < max_addr) {
|
2007-05-02 10:27:09 -07:00
|
|
|
if (coeff_flag && coeff < 0) {
|
|
|
|
/* Split remaining nodes into num-sized chunks */
|
|
|
|
num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
|
|
|
|
num_nodes, num);
|
|
|
|
goto out;
|
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
switch (*(cmdline - 1)) {
|
|
|
|
case '*':
|
|
|
|
/* Split remaining nodes into coeff chunks */
|
|
|
|
if (coeff <= 0)
|
|
|
|
break;
|
|
|
|
num_nodes += split_nodes_equally(nodes, &addr, max_addr,
|
|
|
|
num_nodes, coeff);
|
|
|
|
break;
|
|
|
|
case ',':
|
|
|
|
/* Do not allocate remaining system RAM */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Give one final node */
|
|
|
|
setup_node_range(num_nodes, nodes, &addr,
|
|
|
|
max_addr - addr, max_addr);
|
|
|
|
num_nodes++;
|
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
memnode_shift = compute_hash_shift(nodes, num_nodes);
|
|
|
|
if (memnode_shift < 0) {
|
|
|
|
memnode_shift = 0;
|
|
|
|
printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
|
|
|
|
"disabled.\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to vacate all active ranges that may have been registered by
|
2007-07-21 08:11:30 -07:00
|
|
|
* SRAT and set acpi_numa to -1 so that srat_disabled() always returns
|
|
|
|
* true. NUMA emulation has succeeded so we will not scan ACPI nodes.
|
2007-05-02 10:27:09 -07:00
|
|
|
*/
|
|
|
|
remove_all_active_ranges();
|
2007-07-21 08:11:30 -07:00
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
acpi_numa = -1;
|
|
|
|
#endif
|
2007-05-02 10:27:20 -07:00
|
|
|
for_each_node_mask(i, node_possible_map) {
|
2006-09-27 01:49:52 -07:00
|
|
|
e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
|
|
|
|
nodes[i].end >> PAGE_SHIFT);
|
2005-04-16 15:20:36 -07:00
|
|
|
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
|
2006-09-27 01:49:52 -07:00
|
|
|
}
|
2007-07-21 08:10:32 -07:00
|
|
|
acpi_fake_nodes(nodes, num_nodes);
|
2005-04-16 15:20:36 -07:00
|
|
|
numa_init_array();
|
|
|
|
return 0;
|
|
|
|
}
|
2007-05-02 10:27:09 -07:00
|
|
|
#endif /* CONFIG_NUMA_EMU */
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2007-05-02 10:27:20 -07:00
|
|
|
nodes_clear(node_possible_map);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
#ifdef CONFIG_NUMA_EMU
|
2007-05-02 10:27:09 -07:00
|
|
|
if (cmdline && !numa_emulation(start_pfn, end_pfn))
|
2005-04-16 15:20:36 -07:00
|
|
|
return;
|
2007-05-02 10:27:20 -07:00
|
|
|
nodes_clear(node_possible_map);
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
|
|
|
|
end_pfn << PAGE_SHIFT))
|
|
|
|
return;
|
2007-05-02 10:27:20 -07:00
|
|
|
nodes_clear(node_possible_map);
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_K8_NUMA
|
|
|
|
if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
|
|
|
|
return;
|
2007-05-02 10:27:20 -07:00
|
|
|
nodes_clear(node_possible_map);
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
|
|
|
printk(KERN_INFO "%s\n",
|
|
|
|
numa_off ? "NUMA turned off" : "No NUMA configuration found");
|
|
|
|
|
|
|
|
printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
|
|
|
|
start_pfn << PAGE_SHIFT,
|
|
|
|
end_pfn << PAGE_SHIFT);
|
|
|
|
/* setup dummy node covering all memory */
|
|
|
|
memnode_shift = 63;
|
2007-02-13 05:26:19 -07:00
|
|
|
memnodemap = memnode.embedded_map;
|
2005-04-16 15:20:36 -07:00
|
|
|
memnodemap[0] = 0;
|
|
|
|
nodes_clear(node_online_map);
|
|
|
|
node_set_online(0);
|
2007-05-02 10:27:20 -07:00
|
|
|
node_set(0, node_possible_map);
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i = 0; i < NR_CPUS; i++)
|
2005-11-05 09:25:53 -07:00
|
|
|
numa_set_node(i, 0);
|
2005-04-16 15:20:36 -07:00
|
|
|
node_to_cpumask[0] = cpumask_of_cpu(0);
|
2006-09-27 01:49:52 -07:00
|
|
|
e820_register_active_regions(0, start_pfn, end_pfn);
|
2005-04-16 15:20:36 -07:00
|
|
|
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
[PATCH] x86_64: Change init sections for CPU hotplug support
This patch adds __cpuinit and __cpuinitdata sections that need to exist past
boot to support cpu hotplug.
Caveat: This is done *only* for EM64T CPU Hotplug support, on request from
Andi Kleen. Much of the generic hotplug code in kernel, and none of the other
archs that support CPU hotplug today, i386, ia64, ppc64, s390 and parisc dont
mark sections with __cpuinit, but only mark them as __devinit, and
__devinitdata.
If someone is motivated to change generic code, we need to make sure all
existing hotplug code does not break, on other arch's that dont use __cpuinit,
and __cpudevinit.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Andi Kleen <ak@muc.de>
Acked-by: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-25 14:54:58 -07:00
|
|
|
__cpuinit void numa_add_cpu(int cpu)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2005-09-30 11:59:21 -07:00
|
|
|
set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2005-11-05 09:25:53 -07:00
|
|
|
void __cpuinit numa_set_node(int cpu, int node)
|
|
|
|
{
|
2006-01-11 14:45:39 -07:00
|
|
|
cpu_pda(cpu)->nodenumber = node;
|
x86: fix cpu_to_node references
In x86_64 and i386 architectures most arrays that are sized using
NR_CPUS lay in local memory on node 0. Not only will most (99%?) of the
systems not use all the slots in these arrays, particularly when NR_CPUS
is increased to accommodate future very high cpu count systems, but a
number of cache lines are passed unnecessarily on the system bus when
these arrays are referenced by cpus on other nodes.
Typically, the values in these arrays are referenced by the cpu
accessing it's own values, though when passing IPI interrupts, the cpu
does access the data relevant to the targeted cpu/node. Of course, if
the referencing cpu is not on node 0, then the reference will still
require cross node exchanges of cache lines. A common use of this is
for an interrupt service routine to pass the interrupt to other cpus
local to that node.
Ideally, all the elements in these arrays should be moved to the per_cpu
data area. In some cases (such as x86_cpu_to_apicid) the array is
referenced before the per_cpu data areas are setup. In this case, a
static array is declared in the __initdata area and initialized by the
booting cpu (BSP). The values are then moved to the per_cpu area after
it is initialized and the original static array is freed with the rest
of the __initdata.
This patch:
Fix four instances where cpu_to_node is referenced by array instead of
via the cpu_to_node macro. This is preparation to moving it to the
per_cpu data area.
Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2007-10-17 09:04:39 -07:00
|
|
|
cpu_to_node(cpu) = node;
|
2005-11-05 09:25:53 -07:00
|
|
|
}
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long __init numa_free_all_bootmem(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned long pages = 0;
|
|
|
|
for_each_online_node(i) {
|
|
|
|
pages += free_all_bootmem_node(NODE_DATA(i));
|
|
|
|
}
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
2006-10-11 01:20:39 -07:00
|
|
|
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
|
|
|
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
|
|
|
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
|
|
|
|
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
|
|
|
max_zone_pfns[ZONE_NORMAL] = end_pfn;
|
2005-11-05 09:25:54 -07:00
|
|
|
|
2007-02-13 05:26:25 -07:00
|
|
|
sparse_memory_present_with_active_regions(MAX_NUMNODES);
|
|
|
|
sparse_init();
|
2005-11-05 09:25:54 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
for_each_online_node(i) {
|
|
|
|
setup_node_zones(i);
|
|
|
|
}
|
2006-09-27 01:49:52 -07:00
|
|
|
|
|
|
|
free_area_init_nodes(max_zone_pfns);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2006-09-26 01:52:32 -07:00
|
|
|
static __init int numa_setup(char *opt)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2006-09-26 01:52:32 -07:00
|
|
|
if (!opt)
|
|
|
|
return -EINVAL;
|
2005-04-16 15:20:36 -07:00
|
|
|
if (!strncmp(opt,"off",3))
|
|
|
|
numa_off = 1;
|
|
|
|
#ifdef CONFIG_NUMA_EMU
|
2007-05-02 10:27:09 -07:00
|
|
|
if (!strncmp(opt, "fake=", 5))
|
|
|
|
cmdline = opt + 5;
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
if (!strncmp(opt,"noacpi",6))
|
|
|
|
acpi_numa = -1;
|
2006-04-07 10:49:18 -07:00
|
|
|
if (!strncmp(opt,"hotadd=", 7))
|
|
|
|
hotadd_percent = simple_strtoul(opt+7, NULL, 10);
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
2006-09-26 01:52:32 -07:00
|
|
|
return 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2006-09-26 01:52:32 -07:00
|
|
|
early_param("numa", numa_setup);
|
|
|
|
|
2006-01-11 14:45:36 -07:00
|
|
|
/*
|
|
|
|
* Setup early cpu_to_node.
|
|
|
|
*
|
|
|
|
* Populate cpu_to_node[] only if x86_cpu_to_apicid[],
|
|
|
|
* and apicid_to_node[] tables have valid entries for a CPU.
|
|
|
|
* This means we skip cpu_to_node[] initialisation for NUMA
|
|
|
|
* emulation and faking node case (when running a kernel compiled
|
|
|
|
* for NUMA on a non NUMA box), which is OK as cpu_to_node[]
|
|
|
|
* is already initialized in a round robin manner at numa_init_array,
|
|
|
|
* prior to this call, and this initialization is good enough
|
|
|
|
* for the fake NUMA cases.
|
|
|
|
*/
|
|
|
|
void __init init_cpu_to_node(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < NR_CPUS; i++) {
|
2007-10-19 11:35:03 -07:00
|
|
|
u8 apicid = x86_cpu_to_apicid_init[i];
|
2006-01-11 14:45:36 -07:00
|
|
|
if (apicid == BAD_APICID)
|
|
|
|
continue;
|
|
|
|
if (apicid_to_node[apicid] == NUMA_NO_NODE)
|
|
|
|
continue;
|
2006-02-15 16:17:41 -07:00
|
|
|
numa_set_node(i,apicid_to_node[apicid]);
|
2006-01-11 14:45:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
EXPORT_SYMBOL(cpu_to_node);
|
|
|
|
EXPORT_SYMBOL(node_to_cpumask);
|
|
|
|
EXPORT_SYMBOL(node_data);
|
2006-01-11 14:46:27 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_DISCONTIGMEM
|
|
|
|
/*
|
|
|
|
* Functions to convert PFNs from/to per node page addresses.
|
|
|
|
* These are out of line because they are quite big.
|
|
|
|
* They could be all tuned by pre caching more state.
|
|
|
|
* Should do that.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int pfn_valid(unsigned long pfn)
|
|
|
|
{
|
|
|
|
unsigned nid;
|
|
|
|
if (pfn >= num_physpages)
|
|
|
|
return 0;
|
|
|
|
nid = pfn_to_nid(pfn);
|
|
|
|
if (nid == 0xff)
|
|
|
|
return 0;
|
|
|
|
return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pfn_valid);
|
|
|
|
#endif
|