1
linux/arch/ia64/mm/hugetlbpage.c
Jack Steiner 34d8a380d7 GRU Driver: hardware data structures
This series of patches adds a driver for the SGI UV GRU.  The driver is
still in development but it currently compiles for both x86_64 & IA64.
All simple regression tests pass on IA64.  Although features remain to be
added, I'd like to start the process of getting the driver into the
kernel.  Additional kernel drivers will depend on services provide by the
GRU driver.

The GRU is a hardware resource located in the system chipset.  The GRU
contains memory that is mmaped into the user address space.  This memory
is used to communicate with the GRU to perform functions such as
load/store, scatter/gather, bcopy, AMOs, etc.  The GRU is directly
accessed by user instructions using user virtual addresses.  GRU
instructions (ex., bcopy) use user virtual addresses for operands.

The GRU contains a large TLB that is functionally very similar to
processor TLBs.  Because the external contains a TLB with user virtual
address, it requires callouts from the core VM system when certain types
of changes are made to the process page tables.  There are several MMUOPS
patches currently being discussed but none has been accepted into the
kernel.  The GRU driver is built using version V18 from Andrea Arcangeli.

This patch:

Contains the definitions of the hardware GRU data structures that are used
by the driver to manage the GRU.

[akpm@linux-foundation;org: export hpage_shift]
Signed-off-by: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-30 09:41:47 -07:00

209 lines
4.8 KiB
C

/*
* IA-64 Huge TLB Page Support for Kernel.
*
* Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
* Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
*
* Sep, 2003: add numa support
* Feb, 2004: dynamic hugetlb page size via boot parameter
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/log2.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
EXPORT_SYMBOL(hpage_shift);
pte_t *
huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
pud = pud_alloc(mm, pgd, taddr);
if (pud) {
pmd = pmd_alloc(mm, pud, taddr);
if (pmd)
pte = pte_alloc_map(mm, pmd, taddr);
}
return pte;
}
pte_t *
huge_pte_offset (struct mm_struct *mm, unsigned long addr)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
pud = pud_offset(pgd, taddr);
if (pud_present(*pud)) {
pmd = pmd_offset(pud, taddr);
if (pmd_present(*pmd))
pte = pte_offset_map(pmd, taddr);
}
}
return pte;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/*
* Don't actually need to do any preparation, but need to make sure
* the address is in the right region.
*/
int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
if (REGION_NUMBER(addr) != RGN_HPAGE)
return -EINVAL;
return 0;
}
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
{
struct page *page;
pte_t *ptep;
if (REGION_NUMBER(addr) != RGN_HPAGE)
return ERR_PTR(-EINVAL);
ptep = huge_pte_offset(mm, addr);
if (!ptep || pte_none(*ptep))
return NULL;
page = pte_page(*ptep);
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}
int pmd_huge(pmd_t pmd)
{
return 0;
}
int pud_huge(pud_t pud)
{
return 0;
}
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{
return NULL;
}
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
/*
* This is called to free hugetlb page tables.
*
* The offset of these addresses from the base of the hugetlb
* region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
* the standard free_pgd_range will free the right page tables.
*
* If floor and ceiling are also in the hugetlb region, they
* must likewise be scaled down; but if outside, left unchanged.
*/
addr = htlbpage_to_page(addr);
end = htlbpage_to_page(end);
if (REGION_NUMBER(floor) == RGN_HPAGE)
floor = htlbpage_to_page(floor);
if (REGION_NUMBER(ceiling) == RGN_HPAGE)
ceiling = htlbpage_to_page(ceiling);
free_pgd_range(tlb, addr, end, floor, ceiling);
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vmm;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
if (len & ~HPAGE_MASK)
return -EINVAL;
/* Handle MAP_FIXED */
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
/* This code assumes that RGN_HPAGE != 0. */
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
addr = HPAGE_REGION_BASE;
else
addr = ALIGN(addr, HPAGE_SIZE);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
return -ENOMEM;
if (!vmm || (addr + len) <= vmm->vm_start)
return addr;
addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
}
}
static int __init hugetlb_setup_sz(char *str)
{
u64 tr_pages;
unsigned long long size;
if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
/*
* shouldn't happen, but just in case.
*/
tr_pages = 0x15557000UL;
size = memparse(str, &str);
if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
size <= PAGE_SIZE ||
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
printk(KERN_WARNING "Invalid huge page size specified\n");
return 1;
}
hpage_shift = __ffs(size);
/*
* boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
* override here with new page shift.
*/
ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
return 0;
}
early_param("hugepagesz", hugetlb_setup_sz);