1
linux/arch/powerpc/mm/slb.c
David Gibson 14c89e7fc8 [PATCH] powerpc: Replace VMALLOCBASE with VMALLOC_START
On ppc64, we independently define VMALLOCBASE and VMALLOC_START to be
the same thing: the start of the vmalloc() area at 0xd000000000000000.
VMALLOC_START is used much more widely, including in generic code, so
this patch gets rid of the extraneous VMALLOCBASE.

This does require moving the definitions of region IDs from page_64.h
to pgtable.h, but they don't clearly belong in the former rather than
the latter, anyway.  While we're moving them, clean up the definitions
of the REGION_IDs:
	- Abolish REGION_SIZE, it was only used once, to define
REGION_MASK anyway
	- Define the specific region ids in terms of the REGION_ID()
macro.
	- Define KERNEL_REGION_ID in terms of PAGE_OFFSET rather than
KERNELBASE.  It amounts to the same thing, but conceptually this is
about the region of the linear mapping (which starts at PAGE_OFFSET)
rather than of the kernel text itself (which is at KERNELBASE).

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-09 15:05:47 +11:00

231 lines
6.5 KiB
C

/*
* PowerPC64 SLB support.
*
* Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
* Based on earlier code writteh by:
* Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
* Copyright (c) 2001 Dave Engebretsen
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
static void slb_allocate(unsigned long ea)
{
/* Currently, we do real mode for all SLBs including user, but
* that will change if we bring back dynamic VSIDs
*/
slb_allocate_realmode(ea);
}
static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
{
return (ea & ESID_MASK) | SLB_ESID_V | slot;
}
static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
{
return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
}
static inline void create_slbe(unsigned long ea, unsigned long flags,
unsigned long entry)
{
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, flags)),
"r" (mk_esid_data(ea, entry))
: "memory" );
}
static void slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
unsigned long linear_llp, virtual_llp, lflags, vflags;
unsigned long ksp_esid_data;
WARN_ON(!irqs_disabled());
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | virtual_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
ksp_esid_data &= ~SLB_ESID_V;
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
"slbia\n"
/* Slot 1 - first VMALLOC segment */
"slbmte %0,%1\n"
/* Slot 2 - kernel stack */
"slbmte %2,%3\n"
"isync"
:: "r"(mk_vsid_data(VMALLOC_START, vflags)),
"r"(mk_esid_data(VMALLOC_START, 1)),
"r"(mk_vsid_data(ksp_esid_data, lflags)),
"r"(ksp_esid_data)
: "memory");
}
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long offset = get_paca()->slb_cache_ptr;
unsigned long esid_data = 0;
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
if (offset <= SLB_CACHE_ENTRIES) {
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
esid_data = ((unsigned long)get_paca()->slb_cache[i]
<< SID_SHIFT) | SLBIE_C;
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
} else {
slb_flush_and_rebolt();
}
/* Workaround POWER5 < DD2.1 issue */
if (offset == 1 || offset > SLB_CACHE_ENTRIES)
asm volatile("slbie %0" : : "r" (esid_data));
get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context;
#ifdef CONFIG_PPC_64K_PAGES
get_paca()->pgdir = mm->pgd;
#endif /* CONFIG_PPC_64K_PAGES */
/*
* preload some userspace segments into the SLB.
*/
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
if (is_kernel_addr(pc))
return;
slb_allocate(pc);
if (GET_ESID(pc) == GET_ESID(stack))
return;
if (is_kernel_addr(stack))
return;
slb_allocate(stack);
if ((GET_ESID(pc) == GET_ESID(unmapped_base))
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
if (is_kernel_addr(unmapped_base))
return;
slb_allocate(unmapped_base);
}
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
/* Assume the instruction had a "0" immediate value, just
* "or" in the new value
*/
*insn_addr |= immed;
flush_icache_range((unsigned long)insn_addr, 4+
(unsigned long)insn_addr);
}
void slb_initialize(void)
{
unsigned long linear_llp, virtual_llp;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_virtual;
extern unsigned int *slb_miss_user_load_normal;
#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int *slb_miss_user_load_huge;
unsigned long huge_llp;
huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_virtual,
SLB_VSID_KERNEL | virtual_llp);
patch_slb_encoding(slb_miss_user_load_normal,
SLB_VSID_USER | virtual_llp);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: virtual LLP = %04x\n", virtual_llp);
#ifdef CONFIG_HUGETLB_PAGE
patch_slb_encoding(slb_miss_user_load_huge,
SLB_VSID_USER | huge_llp);
DBG("SLB: huge LLP = %04x\n", huge_llp);
#endif
}
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */
#ifndef CONFIG_PPC_ISERIES
{
unsigned long lflags, vflags;
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | virtual_llp;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_slbe(PAGE_OFFSET, lflags, 0);
/* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes
* elsewhere, we'll call _switch() which will bolt in the new
* one. */
asm volatile("isync":::"memory");
}
#endif /* CONFIG_PPC_ISERIES */
get_paca()->stab_rr = SLB_NUM_BOLTED;
}