1
linux/include/asm-sparc64/pgalloc.h
David S. Miller 74bf4312ff [SPARC64]: Move away from virtual page tables, part 1.
We now use the TSB hardware assist features of the UltraSPARC
MMUs.

SMP is currently knowingly broken, we need to find another place
to store the per-cpu base pointers.  We hid them away in the TSB
base register, and that obviously will not work any more :-)

Another known broken case is non-8KB base page size.

Also noticed that flush_tlb_all() is not referenced anywhere, only
the internal __flush_tlb_all() (local cpu only) is used by the
sparc64 port, so we can get rid of flush_tlb_all().

The kernel gets it's own 8KB TSB (swapper_tsb) and each address space
gets it's own private 8K TSB.  Later we can add code to dynamically
increase the size of per-process TSB as the RSS grows.  An 8KB TSB is
good enough for up to about a 4MB RSS, after which the TSB starts to
incur many capacity and conflict misses.

We even accumulate OBP translations into the kernel TSB.

Another area for refinement is large page size support.  We could use
a secondary address space TSB to handle those.

Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-20 01:11:13 -08:00

186 lines
4.3 KiB
C

/* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
#ifndef _SPARC64_PGALLOC_H
#define _SPARC64_PGALLOC_H
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/spitfire.h>
#include <asm/cpudata.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/* Page table allocation/freeing. */
#ifdef CONFIG_SMP
/* Sliiiicck */
#define pgt_quicklists local_cpu_data()
#else
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache[2];
unsigned int pgcache_size;
} pgt_quicklists;
#endif
#define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgcache_size)
static __inline__ void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
preempt_enable();
}
static __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
preempt_disable();
if((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
preempt_enable();
} else {
preempt_enable();
ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
if(ret)
memset(ret, 0, PAGE_SIZE);
}
return (pgd_t *)ret;
}
static __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
/* XXX This crap can die, no longer using virtual page tables... */
#ifdef DCACHE_ALIASING_POSSIBLE
#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
#else
#define VPTE_COLOR(address) 0
#define DCACHE_COLOR(address) 0
#endif
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
int color = 0;
preempt_disable();
if (pte_quicklist[color] == NULL)
color = 1;
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pmd_t *)ret;
}
static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd = pmd_alloc_one_fast(mm, address);
if (!pmd) {
pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
}
return pmd;
}
static __inline__ void free_pmd_fast(pmd_t *pmd)
{
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
preempt_disable();
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pmd;
pgtable_cache_size++;
preempt_enable();
}
static __inline__ void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = pte_alloc_one_kernel(mm, addr);
if (pte)
return virt_to_page(pte);
return NULL;
}
static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long color = VPTE_COLOR(address);
unsigned long *ret;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *)ret;
}
static __inline__ void free_pte_fast(pte_t *pte)
{
unsigned long color = DCACHE_COLOR((unsigned long)pte);
preempt_disable();
*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
}
static __inline__ void free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free_kernel(pte_t *pte)
{
free_pte_fast(pte);
}
static inline void pte_free(struct page *ptepage)
{
free_pte_fast(page_address(ptepage));
}
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
#endif /* _SPARC64_PGALLOC_H */