I observed that for each of the shared file-backed page faults, we're very likely to retry one more time for the 1st write fault upon no page. It's because we'll need to release the mmap lock for dirty rate limit purpose with balance_dirty_pages_ratelimited() (in fault_dirty_shared_page()). Then after that throttling we return VM_FAULT_RETRY. We did that probably because VM_FAULT_RETRY is the only way we can return to the fault handler at that time telling it we've released the mmap lock. However that's not ideal because it's very likely the fault does not need to be retried at all since the pgtable was well installed before the throttling, so the next continuous fault (including taking mmap read lock, walk the pgtable, etc.) could be in most cases unnecessary. It's not only slowing down page faults for shared file-backed, but also add more mmap lock contention which is in most cases not needed at all. To observe this, one could try to write to some shmem page and look at "pgfault" value in /proc/vmstat, then we should expect 2 counts for each shmem write simply because we retried, and vm event "pgfault" will capture that. To make it more efficient, add a new VM_FAULT_COMPLETED return code just to show that we've completed the whole fault and released the lock. It's also a hint that we should very possibly not need another fault immediately on this page because we've just completed it. This patch provides a ~12% perf boost on my aarch64 test VM with a simple program sequentially dirtying 400MB shmem file being mmap()ed and these are the time it needs: Before: 650.980 ms (+-1.94%) After: 569.396 ms (+-1.38%) I believe it could help more than that. We need some special care on GUP and the s390 pgfault handler (for gmap code before returning from pgfault), the rest changes in the page fault handlers should be relatively straightforward. Another thing to mention is that mm_account_fault() does take this new fault as a generic fault to be accounted, unlike VM_FAULT_RETRY. I explicitly didn't touch hmm_vma_fault() and break_ksm() because they do not handle VM_FAULT_RETRY even with existing code, so I'm literally keeping them as-is. Link: https://lkml.kernel.org/r/20220530183450.42886-1-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vineet Gupta <vgupta@kernel.org> Acked-by: Guo Ren <guoren@kernel.org> Acked-by: Max Filippov <jcmvbkbc@gmail.com> Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Reviewed-by: Ingo Molnar <mingo@kernel.org> Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> [arm part] Acked-by: Heiko Carstens <hca@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Stafford Horne <shorne@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Brian Cain <bcain@quicinc.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Richard Weinberger <richard@nod.at> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Will Deacon <will@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Michal Simek <monstr@monstr.eu> Cc: Matt Turner <mattst88@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: David Hildenbrand <david@redhat.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Chris Zankel <chris@zankel.net> Cc: Hugh Dickins <hughd@google.com> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Rich Felker <dalias@libc.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Helge Deller <deller@gmx.de> Cc: Yoshinori Sato <ysato@users.osdn.me> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
296 lines
7.7 KiB
C
296 lines
7.7 KiB
C
/*
|
|
* arch/microblaze/mm/fault.c
|
|
*
|
|
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
|
|
*
|
|
* Derived from "arch/ppc/mm/fault.c"
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
*
|
|
* Derived from "arch/i386/mm/fault.c"
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
*
|
|
* Modified by Cort Dougan and Paul Mackerras.
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
* Public License. See the file COPYING in the main directory of this
|
|
* archive for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/extable.h>
|
|
#include <linux/signal.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/mmu.h>
|
|
#include <linux/mmu_context.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/exceptions.h>
|
|
|
|
static unsigned long pte_misses; /* updated by do_page_fault() */
|
|
static unsigned long pte_errors; /* updated by do_page_fault() */
|
|
|
|
/*
|
|
* Check whether the instruction at regs->pc is a store using
|
|
* an update addressing form which will update r1.
|
|
*/
|
|
static int store_updates_sp(struct pt_regs *regs)
|
|
{
|
|
unsigned int inst;
|
|
|
|
if (get_user(inst, (unsigned int __user *)regs->pc))
|
|
return 0;
|
|
/* check for 1 in the rD field */
|
|
if (((inst >> 21) & 0x1f) != 1)
|
|
return 0;
|
|
/* check for store opcodes */
|
|
if ((inst & 0xd0000000) == 0xd0000000)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
|
|
/*
|
|
* bad_page_fault is called when we have a bad access from the kernel.
|
|
* It is called from do_page_fault above and from some of the procedures
|
|
* in traps.c.
|
|
*/
|
|
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
|
|
{
|
|
const struct exception_table_entry *fixup;
|
|
/* MS: no context */
|
|
/* Are we prepared to handle this fault? */
|
|
fixup = search_exception_tables(regs->pc);
|
|
if (fixup) {
|
|
regs->pc = fixup->fixup;
|
|
return;
|
|
}
|
|
|
|
/* kernel has accessed a bad area */
|
|
die("kernel access of bad area", regs, sig);
|
|
}
|
|
|
|
/*
|
|
* The error_code parameter is ESR for a data fault,
|
|
* 0 for an instruction fault.
|
|
*/
|
|
void do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
unsigned long error_code)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct mm_struct *mm = current->mm;
|
|
int code = SEGV_MAPERR;
|
|
int is_write = error_code & ESR_S;
|
|
vm_fault_t fault;
|
|
unsigned int flags = FAULT_FLAG_DEFAULT;
|
|
|
|
regs->ear = address;
|
|
regs->esr = error_code;
|
|
|
|
/* On a kernel SLB miss we can only check for a valid exception entry */
|
|
if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
|
|
pr_warn("kernel task_size exceed");
|
|
_exception(SIGSEGV, regs, code, address);
|
|
}
|
|
|
|
/* for instr TLB miss and instr storage exception ESR_S is undefined */
|
|
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
|
|
is_write = 0;
|
|
|
|
if (unlikely(faulthandler_disabled() || !mm)) {
|
|
if (kernel_mode(regs))
|
|
goto bad_area_nosemaphore;
|
|
|
|
/* faulthandler_disabled() in user mode is really bad,
|
|
as is current->mm == NULL. */
|
|
pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
|
|
mm);
|
|
pr_emerg("r15 = %lx MSR = %lx\n",
|
|
regs->r15, regs->msr);
|
|
die("Weird page fault", regs, SIGSEGV);
|
|
}
|
|
|
|
if (user_mode(regs))
|
|
flags |= FAULT_FLAG_USER;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
|
|
/* When running in the kernel we expect faults to occur only to
|
|
* addresses in user space. All other faults represent errors in the
|
|
* kernel and should generate an OOPS. Unfortunately, in the case of an
|
|
* erroneous fault occurring in a code path which already holds mmap_lock
|
|
* we will deadlock attempting to validate the fault against the
|
|
* address space. Luckily the kernel only validly references user
|
|
* space from well defined areas of code, which are listed in the
|
|
* exceptions table.
|
|
*
|
|
* As the vast majority of faults will be valid we will only perform
|
|
* the source reference check when there is a possibility of a deadlock.
|
|
* Attempt to lock the address space, if we cannot we then validate the
|
|
* source. If this is invalid we can skip the address space check,
|
|
* thus avoiding the deadlock.
|
|
*/
|
|
if (unlikely(!mmap_read_trylock(mm))) {
|
|
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
|
|
goto bad_area_nosemaphore;
|
|
|
|
retry:
|
|
mmap_read_lock(mm);
|
|
}
|
|
|
|
vma = find_vma(mm, address);
|
|
if (unlikely(!vma))
|
|
goto bad_area;
|
|
|
|
if (vma->vm_start <= address)
|
|
goto good_area;
|
|
|
|
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
|
|
goto bad_area;
|
|
|
|
if (unlikely(!is_write))
|
|
goto bad_area;
|
|
|
|
/*
|
|
* N.B. The ABI allows programs to access up to
|
|
* a few hundred bytes below the stack pointer (TBD).
|
|
* The kernel signal delivery code writes up to about 1.5kB
|
|
* below the stack pointer (r1) before decrementing it.
|
|
* The exec code can write slightly over 640kB to the stack
|
|
* before setting the user r1. Thus we allow the stack to
|
|
* expand to 1MB without further checks.
|
|
*/
|
|
if (unlikely(address + 0x100000 < vma->vm_end)) {
|
|
|
|
/* get user regs even if this fault is in kernel mode */
|
|
struct pt_regs *uregs = current->thread.regs;
|
|
if (uregs == NULL)
|
|
goto bad_area;
|
|
|
|
/*
|
|
* A user-mode access to an address a long way below
|
|
* the stack pointer is only valid if the instruction
|
|
* is one which would update the stack pointer to the
|
|
* address accessed if the instruction completed,
|
|
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
|
|
* (or the byte, halfword, float or double forms).
|
|
*
|
|
* If we don't check this then any write to the area
|
|
* between the last mapped region and the stack will
|
|
* expand the stack rather than segfaulting.
|
|
*/
|
|
if (address + 2048 < uregs->r1
|
|
&& (kernel_mode(regs) || !store_updates_sp(regs)))
|
|
goto bad_area;
|
|
}
|
|
if (expand_stack(vma, address))
|
|
goto bad_area;
|
|
|
|
good_area:
|
|
code = SEGV_ACCERR;
|
|
|
|
/* a write */
|
|
if (unlikely(is_write)) {
|
|
if (unlikely(!(vma->vm_flags & VM_WRITE)))
|
|
goto bad_area;
|
|
flags |= FAULT_FLAG_WRITE;
|
|
/* a read */
|
|
} else {
|
|
/* protection fault */
|
|
if (unlikely(error_code & 0x08000000))
|
|
goto bad_area;
|
|
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
|
|
goto bad_area;
|
|
}
|
|
|
|
/*
|
|
* If for any reason at all we couldn't handle the fault,
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
* the fault.
|
|
*/
|
|
fault = handle_mm_fault(vma, address, flags, regs);
|
|
|
|
if (fault_signal_pending(fault, regs))
|
|
return;
|
|
|
|
/* The fault is fully completed (including releasing mmap lock) */
|
|
if (fault & VM_FAULT_COMPLETED)
|
|
return;
|
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
else if (fault & VM_FAULT_SIGSEGV)
|
|
goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
}
|
|
|
|
if (fault & VM_FAULT_RETRY) {
|
|
flags |= FAULT_FLAG_TRIED;
|
|
|
|
/*
|
|
* No need to mmap_read_unlock(mm) as we would
|
|
* have already released it in __lock_page_or_retry
|
|
* in mm/filemap.c.
|
|
*/
|
|
|
|
goto retry;
|
|
}
|
|
|
|
mmap_read_unlock(mm);
|
|
|
|
/*
|
|
* keep track of tlb+htab misses that are good addrs but
|
|
* just need pte's created via handle_mm_fault()
|
|
* -- Cort
|
|
*/
|
|
pte_misses++;
|
|
return;
|
|
|
|
bad_area:
|
|
mmap_read_unlock(mm);
|
|
|
|
bad_area_nosemaphore:
|
|
pte_errors++;
|
|
|
|
/* User mode accesses cause a SIGSEGV */
|
|
if (user_mode(regs)) {
|
|
_exception(SIGSEGV, regs, code, address);
|
|
return;
|
|
}
|
|
|
|
bad_page_fault(regs, address, SIGSEGV);
|
|
return;
|
|
|
|
/*
|
|
* We ran out of memory, or some other thing happened to us that made
|
|
* us unable to handle the page fault gracefully.
|
|
*/
|
|
out_of_memory:
|
|
mmap_read_unlock(mm);
|
|
if (!user_mode(regs))
|
|
bad_page_fault(regs, address, SIGKILL);
|
|
else
|
|
pagefault_out_of_memory();
|
|
return;
|
|
|
|
do_sigbus:
|
|
mmap_read_unlock(mm);
|
|
if (user_mode(regs)) {
|
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
|
|
return;
|
|
}
|
|
bad_page_fault(regs, address, SIGBUS);
|
|
}
|