ARM: 9419/1: mm: Fix kernel memory mapping for xip kernels
The patchset introducing kernel_sec_start/end variables to separate the
kernel/lowmem memory mappings, broke the mapping of the kernel memory
for xipkernels.
kernel_sec_start/end variables are in RO area before the MMU is switched
on for xipkernels.
So these cannot be set early in boot in head.S. Fix this by setting these
after MMU is switched on.
xipkernels need two different mappings for kernel text (starting at
CONFIG_XIP_PHYS_ADDR) and data (starting at CONFIG_PHYS_OFFSET).
Also, move the kernel code mapping from devicemaps_init() to map_kernel().
Fixes: a91da54570
("ARM: 9089/1: Define kernel physical section start and end")
Signed-off-by: Harith George <harith.g@alifsemi.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
This commit is contained in:
parent
9852d85ec9
commit
ed6cbe6e55
@ -252,11 +252,15 @@ __create_page_tables:
|
|||||||
*/
|
*/
|
||||||
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
|
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
|
||||||
ldr r6, =(_end - 1)
|
ldr r6, =(_end - 1)
|
||||||
|
|
||||||
|
/* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */
|
||||||
|
#ifndef CONFIG_XIP_KERNEL
|
||||||
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
|
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
|
||||||
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
|
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
|
||||||
str r8, [r5, #4] @ Save physical start of kernel (BE)
|
str r8, [r5, #4] @ Save physical start of kernel (BE)
|
||||||
#else
|
#else
|
||||||
str r8, [r5] @ Save physical start of kernel (LE)
|
str r8, [r5] @ Save physical start of kernel (LE)
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
orr r3, r8, r7 @ Add the MMU flags
|
orr r3, r8, r7 @ Add the MMU flags
|
||||||
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
|
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
|
||||||
@ -264,6 +268,7 @@ __create_page_tables:
|
|||||||
add r3, r3, #1 << SECTION_SHIFT
|
add r3, r3, #1 << SECTION_SHIFT
|
||||||
cmp r0, r6
|
cmp r0, r6
|
||||||
bls 1b
|
bls 1b
|
||||||
|
#ifndef CONFIG_XIP_KERNEL
|
||||||
eor r3, r3, r7 @ Remove the MMU flags
|
eor r3, r3, r7 @ Remove the MMU flags
|
||||||
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
|
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
|
||||||
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
|
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
|
||||||
@ -271,8 +276,7 @@ __create_page_tables:
|
|||||||
#else
|
#else
|
||||||
str r3, [r5] @ Save physical end of kernel (LE)
|
str r3, [r5] @ Save physical end of kernel (LE)
|
||||||
#endif
|
#endif
|
||||||
|
#else
|
||||||
#ifdef CONFIG_XIP_KERNEL
|
|
||||||
/*
|
/*
|
||||||
* Map the kernel image separately as it is not located in RAM.
|
* Map the kernel image separately as it is not located in RAM.
|
||||||
*/
|
*/
|
||||||
|
@ -1402,18 +1402,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
|
|||||||
create_mapping(&map);
|
create_mapping(&map);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Map the kernel if it is XIP.
|
|
||||||
* It is always first in the modulearea.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_XIP_KERNEL
|
|
||||||
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
|
|
||||||
map.virtual = MODULES_VADDR;
|
|
||||||
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
|
|
||||||
map.type = MT_ROM;
|
|
||||||
create_mapping(&map);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map the cache flushing regions.
|
* Map the cache flushing regions.
|
||||||
*/
|
*/
|
||||||
@ -1603,12 +1591,27 @@ static void __init map_kernel(void)
|
|||||||
* This will only persist until we turn on proper memory management later on
|
* This will only persist until we turn on proper memory management later on
|
||||||
* and we remap the whole kernel with page granularity.
|
* and we remap the whole kernel with page granularity.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_XIP_KERNEL
|
||||||
|
phys_addr_t kernel_nx_start = kernel_sec_start;
|
||||||
|
#else
|
||||||
phys_addr_t kernel_x_start = kernel_sec_start;
|
phys_addr_t kernel_x_start = kernel_sec_start;
|
||||||
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||||
phys_addr_t kernel_nx_start = kernel_x_end;
|
phys_addr_t kernel_nx_start = kernel_x_end;
|
||||||
|
#endif
|
||||||
phys_addr_t kernel_nx_end = kernel_sec_end;
|
phys_addr_t kernel_nx_end = kernel_sec_end;
|
||||||
struct map_desc map;
|
struct map_desc map;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Map the kernel if it is XIP.
|
||||||
|
* It is always first in the modulearea.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_XIP_KERNEL
|
||||||
|
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
|
||||||
|
map.virtual = MODULES_VADDR;
|
||||||
|
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
|
||||||
|
map.type = MT_ROM;
|
||||||
|
create_mapping(&map);
|
||||||
|
#else
|
||||||
map.pfn = __phys_to_pfn(kernel_x_start);
|
map.pfn = __phys_to_pfn(kernel_x_start);
|
||||||
map.virtual = __phys_to_virt(kernel_x_start);
|
map.virtual = __phys_to_virt(kernel_x_start);
|
||||||
map.length = kernel_x_end - kernel_x_start;
|
map.length = kernel_x_end - kernel_x_start;
|
||||||
@ -1618,7 +1621,7 @@ static void __init map_kernel(void)
|
|||||||
/* If the nx part is small it may end up covered by the tail of the RWX section */
|
/* If the nx part is small it may end up covered by the tail of the RWX section */
|
||||||
if (kernel_x_end == kernel_nx_end)
|
if (kernel_x_end == kernel_nx_end)
|
||||||
return;
|
return;
|
||||||
|
#endif
|
||||||
map.pfn = __phys_to_pfn(kernel_nx_start);
|
map.pfn = __phys_to_pfn(kernel_nx_start);
|
||||||
map.virtual = __phys_to_virt(kernel_nx_start);
|
map.virtual = __phys_to_virt(kernel_nx_start);
|
||||||
map.length = kernel_nx_end - kernel_nx_start;
|
map.length = kernel_nx_end - kernel_nx_start;
|
||||||
@ -1764,6 +1767,11 @@ void __init paging_init(const struct machine_desc *mdesc)
|
|||||||
{
|
{
|
||||||
void *zero_page;
|
void *zero_page;
|
||||||
|
|
||||||
|
#ifdef CONFIG_XIP_KERNEL
|
||||||
|
/* Store the kernel RW RAM region start/end in these variables */
|
||||||
|
kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
|
||||||
|
kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
|
||||||
|
#endif
|
||||||
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
|
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
|
||||||
kernel_sec_start, kernel_sec_end);
|
kernel_sec_start, kernel_sec_end);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user