1
linux/arch/powerpc/include/asm/kfence.h
Hari Bathini 353d7a84c2 powerpc/64s/radix/kfence: map __kfence_pool at page granularity
When KFENCE is enabled, total system memory is mapped at page level
granularity. But in radix MMU mode, ~3GB additional memory is needed
to map 100GB of system memory at page level granularity when compared
to using 2MB direct mapping.This is not desired considering KFENCE is
designed to be enabled in production kernels [1].

Mapping only the memory allocated for KFENCE pool at page granularity is
sufficient to enable KFENCE support. So, allocate __kfence_pool during
bootup and map it at page granularity instead of mapping all system
memory at page granularity.

Without patch:
  # cat /proc/meminfo
  MemTotal:       101201920 kB

With patch:
  # cat /proc/meminfo
  MemTotal:       104483904 kB

Note that enabling KFENCE at runtime is disabled for radix MMU for now,
as it depends on the ability to split page table mappings and such APIs
are not currently implemented for radix MMU.

All kfence_test.c testcases passed with this patch.

[1] https://lore.kernel.org/all/20201103175841.3495947-2-elver@google.com/

Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240701130021.578240-1-hbathini@linux.ibm.com
2024-07-04 21:55:18 +10:00

58 lines
1.0 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* powerpc KFENCE support.
*
* Copyright (C) 2020 CS GROUP France
*/
#ifndef __ASM_POWERPC_KFENCE_H
#define __ASM_POWERPC_KFENCE_H
#include <linux/mm.h>
#include <asm/pgtable.h>
#ifdef CONFIG_PPC64_ELF_ABI_V1
#define ARCH_FUNC_PREFIX "."
#endif
#ifdef CONFIG_KFENCE
extern bool kfence_disabled;
static inline void disable_kfence(void)
{
kfence_disabled = true;
}
static inline bool arch_kfence_init_pool(void)
{
return !kfence_disabled;
}
#endif
#ifdef CONFIG_PPC64
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
struct page *page = virt_to_page((void *)addr);
__kernel_map_pages(page, 1, !protect);
return true;
}
#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
if (protect) {
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
} else {
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
}
return true;
}
#endif
#endif /* __ASM_POWERPC_KFENCE_H */