2019-05-29 07:12:41 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-10-31 16:54:08 -07:00
|
|
|
/*
|
|
|
|
* Memory fault handling for Hexagon
|
|
|
|
*
|
2012-09-19 14:22:02 -07:00
|
|
|
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
|
2011-10-31 16:54:08 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page fault handling for the Hexagon Virtual Machine.
|
|
|
|
* Can also be called by a native port emulating the HVM
|
|
|
|
* execptions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/traps.h>
|
2023-11-30 15:58:19 -07:00
|
|
|
#include <asm/vm_fault.h>
|
2016-12-24 12:46:01 -07:00
|
|
|
#include <linux/uaccess.h>
|
2011-10-31 16:54:08 -07:00
|
|
|
#include <linux/mm.h>
|
2017-02-08 10:51:30 -07:00
|
|
|
#include <linux/sched/signal.h>
|
2011-10-31 16:54:08 -07:00
|
|
|
#include <linux/signal.h>
|
2017-01-10 12:13:29 -07:00
|
|
|
#include <linux/extable.h>
|
2011-10-31 16:54:08 -07:00
|
|
|
#include <linux/hardirq.h>
|
2020-08-11 18:38:03 -07:00
|
|
|
#include <linux/perf_event.h>
|
2011-10-31 16:54:08 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Decode of hardware exception sends us to one of several
|
|
|
|
* entry points. At each, we generate canonical arguments
|
|
|
|
* for handling by the abstract memory management code.
|
|
|
|
*/
|
|
|
|
#define FLT_IFETCH -1
|
|
|
|
#define FLT_LOAD 0
|
|
|
|
#define FLT_STORE 1
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Canonical page fault handler
|
|
|
|
*/
|
2023-11-30 15:58:18 -07:00
|
|
|
static void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
|
2011-10-31 16:54:08 -07:00
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct mm_struct *mm = current->mm;
|
2018-04-16 09:26:58 -07:00
|
|
|
int si_signo;
|
2011-10-31 16:54:08 -07:00
|
|
|
int si_code = SEGV_MAPERR;
|
2018-08-17 15:44:47 -07:00
|
|
|
vm_fault_t fault;
|
2011-10-31 16:54:08 -07:00
|
|
|
const struct exception_table_entry *fixup;
|
2020-04-01 21:08:37 -07:00
|
|
|
unsigned int flags = FAULT_FLAG_DEFAULT;
|
2011-10-31 16:54:08 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're in an interrupt or have no user context,
|
|
|
|
* then must not take the fault.
|
|
|
|
*/
|
|
|
|
if (unlikely(in_interrupt() || !mm))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
|
2013-09-12 15:13:39 -07:00
|
|
|
if (user_mode(regs))
|
|
|
|
flags |= FAULT_FLAG_USER;
|
2020-08-11 18:38:03 -07:00
|
|
|
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
2012-03-20 06:23:33 -07:00
|
|
|
retry:
|
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper. They all have the regular fault handling pattern without odd
special cases.
The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).
And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer. That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.
Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment. The cases are all simple, and I went through the
changes several times, but...
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2023-06-24 10:55:38 -07:00
|
|
|
vma = lock_mm_and_find_vma(mm, address, regs);
|
|
|
|
if (unlikely(!vma))
|
|
|
|
goto bad_area_nosemaphore;
|
2011-10-31 16:54:08 -07:00
|
|
|
|
|
|
|
/* Address space is OK. Now check access rights. */
|
|
|
|
si_code = SEGV_ACCERR;
|
|
|
|
|
|
|
|
switch (cause) {
|
|
|
|
case FLT_IFETCH:
|
|
|
|
if (!(vma->vm_flags & VM_EXEC))
|
|
|
|
goto bad_area;
|
|
|
|
break;
|
|
|
|
case FLT_LOAD:
|
|
|
|
if (!(vma->vm_flags & VM_READ))
|
|
|
|
goto bad_area;
|
|
|
|
break;
|
|
|
|
case FLT_STORE:
|
|
|
|
if (!(vma->vm_flags & VM_WRITE))
|
|
|
|
goto bad_area;
|
2013-09-12 15:13:39 -07:00
|
|
|
flags |= FAULT_FLAG_WRITE;
|
2011-10-31 16:54:08 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-08-11 18:38:03 -07:00
|
|
|
fault = handle_mm_fault(vma, address, flags, regs);
|
2012-03-20 06:23:33 -07:00
|
|
|
|
2023-01-30 21:46:10 -07:00
|
|
|
if (fault_signal_pending(fault, regs)) {
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
2012-03-20 06:23:33 -07:00
|
|
|
return;
|
2023-01-30 21:46:10 -07:00
|
|
|
}
|
2011-10-31 16:54:08 -07:00
|
|
|
|
mm: avoid unnecessary page fault retires on shared memory types
I observed that for each of the shared file-backed page faults, we're very
likely to retry one more time for the 1st write fault upon no page. It's
because we'll need to release the mmap lock for dirty rate limit purpose
with balance_dirty_pages_ratelimited() (in fault_dirty_shared_page()).
Then after that throttling we return VM_FAULT_RETRY.
We did that probably because VM_FAULT_RETRY is the only way we can return
to the fault handler at that time telling it we've released the mmap lock.
However that's not ideal because it's very likely the fault does not need
to be retried at all since the pgtable was well installed before the
throttling, so the next continuous fault (including taking mmap read lock,
walk the pgtable, etc.) could be in most cases unnecessary.
It's not only slowing down page faults for shared file-backed, but also add
more mmap lock contention which is in most cases not needed at all.
To observe this, one could try to write to some shmem page and look at
"pgfault" value in /proc/vmstat, then we should expect 2 counts for each
shmem write simply because we retried, and vm event "pgfault" will capture
that.
To make it more efficient, add a new VM_FAULT_COMPLETED return code just to
show that we've completed the whole fault and released the lock. It's also
a hint that we should very possibly not need another fault immediately on
this page because we've just completed it.
This patch provides a ~12% perf boost on my aarch64 test VM with a simple
program sequentially dirtying 400MB shmem file being mmap()ed and these are
the time it needs:
Before: 650.980 ms (+-1.94%)
After: 569.396 ms (+-1.38%)
I believe it could help more than that.
We need some special care on GUP and the s390 pgfault handler (for gmap
code before returning from pgfault), the rest changes in the page fault
handlers should be relatively straightforward.
Another thing to mention is that mm_account_fault() does take this new
fault as a generic fault to be accounted, unlike VM_FAULT_RETRY.
I explicitly didn't touch hmm_vma_fault() and break_ksm() because they do
not handle VM_FAULT_RETRY even with existing code, so I'm literally keeping
them as-is.
Link: https://lkml.kernel.org/r/20220530183450.42886-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vineet Gupta <vgupta@kernel.org>
Acked-by: Guo Ren <guoren@kernel.org>
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> [arm part]
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Stafford Horne <shorne@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Richard Weinberger <richard@nod.at>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Will Deacon <will@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Chris Zankel <chris@zankel.net>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Rich Felker <dalias@libc.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Helge Deller <deller@gmx.de>
Cc: Yoshinori Sato <ysato@users.osdn.me>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-05-30 11:34:50 -07:00
|
|
|
/* The fault is fully completed (including releasing mmap lock) */
|
|
|
|
if (fault & VM_FAULT_COMPLETED)
|
|
|
|
return;
|
|
|
|
|
2011-10-31 16:54:08 -07:00
|
|
|
/* The most common case -- we are done. */
|
|
|
|
if (likely(!(fault & VM_FAULT_ERROR))) {
|
2022-01-14 15:05:51 -07:00
|
|
|
if (fault & VM_FAULT_RETRY) {
|
|
|
|
flags |= FAULT_FLAG_TRIED;
|
|
|
|
goto retry;
|
2012-03-20 06:23:33 -07:00
|
|
|
}
|
2011-10-31 16:54:08 -07:00
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_read_unlock(mm);
|
2011-10-31 16:54:08 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_read_unlock(mm);
|
2011-10-31 16:54:08 -07:00
|
|
|
|
|
|
|
/* Handle copyin/out exception cases */
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
if (fault & VM_FAULT_OOM) {
|
|
|
|
pagefault_out_of_memory();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* User-mode address is in the memory map, but we are
|
|
|
|
* unable to fix up the page fault.
|
|
|
|
*/
|
|
|
|
if (fault & VM_FAULT_SIGBUS) {
|
2018-04-16 09:26:58 -07:00
|
|
|
si_signo = SIGBUS;
|
|
|
|
si_code = BUS_ADRERR;
|
2011-10-31 16:54:08 -07:00
|
|
|
}
|
|
|
|
/* Address is not in the memory map */
|
|
|
|
else {
|
2018-04-16 09:26:58 -07:00
|
|
|
si_signo = SIGSEGV;
|
|
|
|
si_code = SEGV_ACCERR;
|
2011-10-31 16:54:08 -07:00
|
|
|
}
|
2019-05-23 09:04:24 -07:00
|
|
|
force_sig_fault(si_signo, si_code, (void __user *)address);
|
2011-10-31 16:54:08 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
bad_area:
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_read_unlock(mm);
|
2011-10-31 16:54:08 -07:00
|
|
|
|
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper. They all have the regular fault handling pattern without odd
special cases.
The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).
And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer. That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.
Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment. The cases are all simple, and I went through the
changes several times, but...
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2023-06-24 10:55:38 -07:00
|
|
|
bad_area_nosemaphore:
|
2011-10-31 16:54:08 -07:00
|
|
|
if (user_mode(regs)) {
|
2019-05-23 09:04:24 -07:00
|
|
|
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
|
2011-10-31 16:54:08 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Kernel-mode fault falls through */
|
|
|
|
|
|
|
|
no_context:
|
|
|
|
fixup = search_exception_tables(pt_elr(regs));
|
|
|
|
if (fixup) {
|
|
|
|
pt_set_elr(regs, fixup->fixup);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Things are looking very, very bad now */
|
|
|
|
bust_spinlocks(1);
|
|
|
|
printk(KERN_EMERG "Unable to handle kernel paging request at "
|
|
|
|
"virtual address 0x%08lx, regs %p\n", address, regs);
|
|
|
|
die("Bad Kernel VA", regs, SIGKILL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void read_protection_fault(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long badvadr = pt_badva(regs);
|
|
|
|
|
|
|
|
do_page_fault(badvadr, FLT_LOAD, regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write_protection_fault(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long badvadr = pt_badva(regs);
|
|
|
|
|
|
|
|
do_page_fault(badvadr, FLT_STORE, regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void execute_protection_fault(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long badvadr = pt_badva(regs);
|
|
|
|
|
|
|
|
do_page_fault(badvadr, FLT_IFETCH, regs);
|
|
|
|
}
|