529ce23a76
The mm_struct contains a function pointer *get_unmapped_area(), which is set to either arch_get_unmapped_area() or arch_get_unmapped_area_topdown() during the initialization of the mm. Since the function pointer only ever points to two functions that are named the same across all arch's, a function pointer is not really required. In addition future changes will want to add versions of the functions that take additional arguments. So to save a pointers worth of bytes in mm_struct, and prevent adding additional function pointers to mm_struct in future changes, remove it and keep the information about which get_unmapped_area() to use in a flag. Add the new flag to MMF_INIT_MASK so it doesn't get clobbered on fork by mmf_init_flags(). Most MM flags get clobbered on fork. In the pre-existing behavior mm->get_unmapped_area() would get copied to the new mm in dup_mm(), so not clobbering the flag preserves the existing behavior around inheriting the topdown-ness. Introduce a helper, mm_get_unmapped_area(), to easily convert code that refers to the old function pointer to instead select and call either arch_get_unmapped_area() or arch_get_unmapped_area_topdown() based on the flag. Then drop the mm->get_unmapped_area() function pointer. Leave the get_unmapped_area() pointer in struct file_operations alone. The main purpose of this change is to reorganize in preparation for future changes, but it also converts the calls of mm->get_unmapped_area() from indirect branches into a direct ones. The stress-ng bigheap benchmark calls realloc a lot, which calls through get_unmapped_area() in the kernel. On x86, the change yielded a ~1% improvement there on a retpoline config. In testing a few x86 configs, removing the pointer unfortunately didn't result in any actual size reductions in the compiled layout of mm_struct. But depending on compiler or arch alignment requirements, the change could shrink the size of mm_struct. Link: https://lkml.kernel.org/r/20240326021656.202649-3-rick.p.edgecombe@intel.com Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Deepak Gupta <debug@rivosinc.com> Cc: Guo Ren <guoren@kernel.org> Cc: Helge Deller <deller@gmx.de> Cc: H. Peter Anvin (Intel) <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Kees Cook <keescook@chromium.org> Cc: Mark Brown <broonie@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
181 lines
4.0 KiB
C
181 lines
4.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2016-20 Intel Corporation. */
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/miscdevice.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/security.h>
|
|
#include <linux/suspend.h>
|
|
#include <asm/traps.h>
|
|
#include "driver.h"
|
|
#include "encl.h"
|
|
|
|
u64 sgx_attributes_reserved_mask;
|
|
u64 sgx_xfrm_reserved_mask = ~0x3;
|
|
u32 sgx_misc_reserved_mask;
|
|
|
|
static int sgx_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct sgx_encl *encl;
|
|
int ret;
|
|
|
|
encl = kzalloc(sizeof(*encl), GFP_KERNEL);
|
|
if (!encl)
|
|
return -ENOMEM;
|
|
|
|
kref_init(&encl->refcount);
|
|
xa_init(&encl->page_array);
|
|
mutex_init(&encl->lock);
|
|
INIT_LIST_HEAD(&encl->va_pages);
|
|
INIT_LIST_HEAD(&encl->mm_list);
|
|
spin_lock_init(&encl->mm_lock);
|
|
|
|
ret = init_srcu_struct(&encl->srcu);
|
|
if (ret) {
|
|
kfree(encl);
|
|
return ret;
|
|
}
|
|
|
|
file->private_data = encl;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int sgx_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct sgx_encl *encl = file->private_data;
|
|
struct sgx_encl_mm *encl_mm;
|
|
|
|
/*
|
|
* Drain the remaining mm_list entries. At this point the list contains
|
|
* entries for processes, which have closed the enclave file but have
|
|
* not exited yet. The processes, which have exited, are gone from the
|
|
* list by sgx_mmu_notifier_release().
|
|
*/
|
|
for ( ; ; ) {
|
|
spin_lock(&encl->mm_lock);
|
|
|
|
if (list_empty(&encl->mm_list)) {
|
|
encl_mm = NULL;
|
|
} else {
|
|
encl_mm = list_first_entry(&encl->mm_list,
|
|
struct sgx_encl_mm, list);
|
|
list_del_rcu(&encl_mm->list);
|
|
}
|
|
|
|
spin_unlock(&encl->mm_lock);
|
|
|
|
/* The enclave is no longer mapped by any mm. */
|
|
if (!encl_mm)
|
|
break;
|
|
|
|
synchronize_srcu(&encl->srcu);
|
|
mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
|
|
kfree(encl_mm);
|
|
|
|
/* 'encl_mm' is gone, put encl_mm->encl reference: */
|
|
kref_put(&encl->refcount, sgx_encl_release);
|
|
}
|
|
|
|
kref_put(&encl->refcount, sgx_encl_release);
|
|
return 0;
|
|
}
|
|
|
|
static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
|
|
{
|
|
struct sgx_encl *encl = file->private_data;
|
|
int ret;
|
|
|
|
ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = sgx_encl_mm_add(encl, vma->vm_mm);
|
|
if (ret)
|
|
return ret;
|
|
|
|
vma->vm_ops = &sgx_vm_ops;
|
|
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
|
|
vma->vm_private_data = encl;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static unsigned long sgx_get_unmapped_area(struct file *file,
|
|
unsigned long addr,
|
|
unsigned long len,
|
|
unsigned long pgoff,
|
|
unsigned long flags)
|
|
{
|
|
if ((flags & MAP_TYPE) == MAP_PRIVATE)
|
|
return -EINVAL;
|
|
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
|
|
unsigned long arg)
|
|
{
|
|
return sgx_ioctl(filep, cmd, arg);
|
|
}
|
|
#endif
|
|
|
|
static const struct file_operations sgx_encl_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = sgx_open,
|
|
.release = sgx_release,
|
|
.unlocked_ioctl = sgx_ioctl,
|
|
#ifdef CONFIG_COMPAT
|
|
.compat_ioctl = sgx_compat_ioctl,
|
|
#endif
|
|
.mmap = sgx_mmap,
|
|
.get_unmapped_area = sgx_get_unmapped_area,
|
|
};
|
|
|
|
static struct miscdevice sgx_dev_enclave = {
|
|
.minor = MISC_DYNAMIC_MINOR,
|
|
.name = "sgx_enclave",
|
|
.nodename = "sgx_enclave",
|
|
.fops = &sgx_encl_fops,
|
|
};
|
|
|
|
int __init sgx_drv_init(void)
|
|
{
|
|
unsigned int eax, ebx, ecx, edx;
|
|
u64 attr_mask;
|
|
u64 xfrm_mask;
|
|
int ret;
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
|
|
return -ENODEV;
|
|
|
|
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
|
|
|
|
if (!(eax & 1)) {
|
|
pr_err("SGX disabled: SGX1 instruction support not available.\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK;
|
|
|
|
cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx);
|
|
|
|
attr_mask = (((u64)ebx) << 32) + (u64)eax;
|
|
sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK;
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
|
|
xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
|
|
sgx_xfrm_reserved_mask = ~xfrm_mask;
|
|
}
|
|
|
|
ret = misc_register(&sgx_dev_enclave);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return 0;
|
|
}
|