bf03c80699
Patch series "Simplify the page flags a little". In the course of our folio conversions, we have made many page flags only used on folios, so we can now remove the page-based accessors. This should cut down compile time a little, and prevent new users from cropping up. There is more that could be done in this area, but it would produce merge conflicts, so I'll sit on those patches until next merge window. We now have line of sight to removing PG_private_2 and PG_private. This patch (of 10): This flag is now only used on folios, so we can remove all the page accessors. [akpm@linux-foundation.org: fix arch/powerpc/mm/pgtable-frag.c] Link: https://lkml.kernel.org/r/20240821193445.2294269-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240821193445.2294269-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
146 lines
3.2 KiB
C
146 lines
3.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
* Handling Page Tables through page fragments
|
|
*
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlb.h>
|
|
|
|
void pte_frag_destroy(void *pte_frag)
|
|
{
|
|
int count;
|
|
struct ptdesc *ptdesc;
|
|
|
|
ptdesc = virt_to_ptdesc(pte_frag);
|
|
/* drop all the pending references */
|
|
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
|
|
/* We allow PTE_FRAG_NR fragments from a PTE page */
|
|
if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
|
|
pagetable_pte_dtor(ptdesc);
|
|
pagetable_free(ptdesc);
|
|
}
|
|
}
|
|
|
|
static pte_t *get_pte_from_cache(struct mm_struct *mm)
|
|
{
|
|
void *pte_frag, *ret;
|
|
|
|
if (PTE_FRAG_NR == 1)
|
|
return NULL;
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
ret = pte_frag_get(&mm->context);
|
|
if (ret) {
|
|
pte_frag = ret + PTE_FRAG_SIZE;
|
|
/*
|
|
* If we have taken up all the fragments mark PTE page NULL
|
|
*/
|
|
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
|
|
pte_frag = NULL;
|
|
pte_frag_set(&mm->context, pte_frag);
|
|
}
|
|
spin_unlock(&mm->page_table_lock);
|
|
return (pte_t *)ret;
|
|
}
|
|
|
|
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
|
|
{
|
|
void *ret = NULL;
|
|
struct ptdesc *ptdesc;
|
|
|
|
if (!kernel) {
|
|
ptdesc = pagetable_alloc(PGALLOC_GFP | __GFP_ACCOUNT, 0);
|
|
if (!ptdesc)
|
|
return NULL;
|
|
if (!pagetable_pte_ctor(ptdesc)) {
|
|
pagetable_free(ptdesc);
|
|
return NULL;
|
|
}
|
|
} else {
|
|
ptdesc = pagetable_alloc(PGALLOC_GFP, 0);
|
|
if (!ptdesc)
|
|
return NULL;
|
|
}
|
|
|
|
atomic_set(&ptdesc->pt_frag_refcount, 1);
|
|
|
|
ret = ptdesc_address(ptdesc);
|
|
/*
|
|
* if we support only one fragment just return the
|
|
* allocated page.
|
|
*/
|
|
if (PTE_FRAG_NR == 1)
|
|
return ret;
|
|
spin_lock(&mm->page_table_lock);
|
|
/*
|
|
* If we find ptdesc_page set, we return
|
|
* the allocated page with single fragment
|
|
* count.
|
|
*/
|
|
if (likely(!pte_frag_get(&mm->context))) {
|
|
atomic_set(&ptdesc->pt_frag_refcount, PTE_FRAG_NR);
|
|
pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
|
|
}
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
return (pte_t *)ret;
|
|
}
|
|
|
|
pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
|
|
{
|
|
pte_t *pte;
|
|
|
|
pte = get_pte_from_cache(mm);
|
|
if (pte)
|
|
return pte;
|
|
|
|
return __alloc_for_ptecache(mm, kernel);
|
|
}
|
|
|
|
static void pte_free_now(struct rcu_head *head)
|
|
{
|
|
struct ptdesc *ptdesc;
|
|
|
|
ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
|
|
pagetable_pte_dtor(ptdesc);
|
|
pagetable_free(ptdesc);
|
|
}
|
|
|
|
void pte_fragment_free(unsigned long *table, int kernel)
|
|
{
|
|
struct ptdesc *ptdesc = virt_to_ptdesc(table);
|
|
|
|
if (pagetable_is_reserved(ptdesc))
|
|
return free_reserved_ptdesc(ptdesc);
|
|
|
|
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
|
|
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
|
|
if (kernel)
|
|
pagetable_free(ptdesc);
|
|
else if (folio_test_clear_active(ptdesc_folio(ptdesc)))
|
|
call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
|
|
else
|
|
pte_free_now(&ptdesc->pt_rcu_head);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
|
|
{
|
|
struct folio *folio;
|
|
|
|
folio = virt_to_folio(pgtable);
|
|
folio_set_active(folio);
|
|
pte_fragment_free((unsigned long *)pgtable, 0);
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|