7d68dc3f10
Commit 916f676f8d
started reserving boot service code since some systems
require you to keep that code around until SetVirtualAddressMap is called.
However, in some cases those areas will overlap with reserved regions.
The proper medium-term fix is to fix the bootloader to prevent the
conflicts from occurring by moving the kernel to a better position,
but the kernel should check for this possibility, and only reserve regions
which can be reserved.
Signed-off-by: Maarten Lankhorst <m.b.lankhorst@gmail.com>
Link: http://lkml.kernel.org/r/4DF7A005.1050407@gmail.com
Acked-by: Matthew Garrett <mjg@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
24 lines
906 B
C
24 lines
906 B
C
#ifndef _X86_MEMBLOCK_H
|
|
#define _X86_MEMBLOCK_H
|
|
|
|
#define ARCH_DISCARD_MEMBLOCK
|
|
|
|
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
|
|
|
|
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
|
|
void memblock_x86_free_range(u64 start, u64 end);
|
|
struct range;
|
|
int __get_free_all_memory_range(struct range **range, int nodeid,
|
|
unsigned long start_pfn, unsigned long end_pfn);
|
|
int get_free_all_memory_range(struct range **rangep, int nodeid);
|
|
|
|
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
|
|
unsigned long last_pfn);
|
|
u64 memblock_x86_hole_size(u64 start, u64 end);
|
|
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
|
|
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
|
|
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
|
|
bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
|
|
|
|
#endif
|