2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1995 - 2000 by Ralf Baechle
*/
2013-05-29 01:07:19 +02:00
# include <linux/context_tracking.h>
2005-04-16 15:20:36 -07:00
# include <linux/signal.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/ptrace.h>
2015-01-21 10:54:46 +00:00
# include <linux/ratelimit.h>
2005-04-16 15:20:36 -07:00
# include <linux/mman.h>
# include <linux/mm.h>
# include <linux/smp.h>
2010-08-03 11:22:20 -07:00
# include <linux/kprobes.h>
2010-10-12 19:37:21 +08:00
# include <linux/perf_event.h>
2015-05-11 17:52:11 +02:00
# include <linux/uaccess.h>
2005-04-16 15:20:36 -07:00
# include <asm/branch.h>
# include <asm/mmu_context.h>
# include <asm/ptrace.h>
2005-02-19 13:56:04 +00:00
# include <asm/highmem.h> /* For VMALLOC_END */
2010-08-03 11:22:20 -07:00
# include <linux/kdebug.h>
2005-04-16 15:20:36 -07:00
2015-01-21 10:54:46 +00:00
int show_unhandled_signals = 1 ;
2005-04-16 15:20:36 -07:00
/*
* This routine handles page faults . It determines the address ,
* and the problem , and then passes it off to one of the appropriate
* routines .
*/
2013-05-29 01:07:19 +02:00
static void __kprobes __do_page_fault ( struct pt_regs * regs , unsigned long write ,
unsigned long address )
2005-04-16 15:20:36 -07:00
{
struct vm_area_struct * vma = NULL ;
struct task_struct * tsk = current ;
struct mm_struct * mm = tsk - > mm ;
const int field = sizeof ( unsigned long ) * 2 ;
2018-04-15 21:11:06 -05:00
int si_code ;
2018-08-17 15:44:47 -07:00
vm_fault_t fault ;
2020-04-01 21:08:37 -07:00
unsigned int flags = FAULT_FLAG_DEFAULT ;
2005-04-16 15:20:36 -07:00
2015-01-21 10:54:46 +00:00
static DEFINE_RATELIMIT_STATE ( ratelimit_state , 5 * HZ , 10 ) ;
2005-04-16 15:20:36 -07:00
#if 0
2007-03-29 22:30:01 +01:00
printk ( " Cpu%d[%s:%d:%0*lx:%ld:%0*lx] \n " , raw_smp_processor_id ( ) ,
2005-04-16 15:20:36 -07:00
current - > comm , current - > pid , field , address , write ,
field , regs - > cp0_epc ) ;
# endif
2010-08-03 11:22:20 -07:00
# ifdef CONFIG_KPROBES
/*
2015-07-28 20:37:43 +02:00
* This is to notify the fault handler of the kprobes .
2010-08-03 11:22:20 -07:00
*/
if ( notify_die ( DIE_PAGE_FAULT , " page fault " , regs , - 1 ,
2015-07-28 20:37:43 +02:00
current - > thread . trap_nr , SIGSEGV ) = = NOTIFY_STOP )
2010-08-03 11:22:20 -07:00
return ;
# endif
2018-04-15 21:11:06 -05:00
si_code = SEGV_MAPERR ;
2005-04-16 15:20:36 -07:00
/*
* We fault - in kernel - space virtual memory on - demand . The
* ' reference ' page table is init_mm . pgd .
*
* NOTE ! We MUST NOT take any locks for this case . We may
* be in an interrupt or a critical region , and should
* only copy the information from the master page table ,
* nothing more .
*/
2009-09-02 15:47:34 -07:00
# ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
# else
# define VMALLOC_FAULT_TARGET vmalloc_fault
# endif
2005-02-19 13:56:04 +00:00
if ( unlikely ( address > = VMALLOC_START & & address < = VMALLOC_END ) )
2009-09-02 15:47:34 -07:00
goto VMALLOC_FAULT_TARGET ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# ifdef MODULE_START
if ( unlikely ( address > = MODULE_START & & address < MODULE_END ) )
2009-09-02 15:47:34 -07:00
goto VMALLOC_FAULT_TARGET ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# endif
2005-04-16 15:20:36 -07:00
/*
* If we ' re in an interrupt or have no user
* context , we must not take the fault . .
*/
2015-05-11 17:52:11 +02:00
if ( faulthandler_disabled ( ) | | ! mm )
2005-04-16 15:20:36 -07:00
goto bad_area_nosemaphore ;
2013-09-12 15:13:39 -07:00
if ( user_mode ( regs ) )
flags | = FAULT_FLAG_USER ;
2011-12-23 16:52:42 +05:30
retry :
2020-06-08 21:33:29 -07:00
mmap_read_lock ( mm ) ;
2005-04-16 15:20:36 -07:00
vma = find_vma ( mm , address ) ;
if ( ! vma )
goto bad_area ;
if ( vma - > vm_start < = address )
goto good_area ;
if ( ! ( vma - > vm_flags & VM_GROWSDOWN ) )
goto bad_area ;
if ( expand_stack ( vma , address ) )
goto bad_area ;
/*
* Ok , we have a good vm_area for this memory access , so
* we can handle it . .
*/
good_area :
2018-04-15 21:11:06 -05:00
si_code = SEGV_ACCERR ;
2005-04-16 15:20:36 -07:00
if ( write ) {
if ( ! ( vma - > vm_flags & VM_WRITE ) )
goto bad_area ;
2013-09-12 15:13:39 -07:00
flags | = FAULT_FLAG_WRITE ;
2005-04-16 15:20:36 -07:00
} else {
2012-09-13 16:51:46 -05:00
if ( cpu_has_rixi ) {
2010-02-10 15:12:47 -08:00
if ( address = = regs - > cp0_epc & & ! ( vma - > vm_flags & VM_EXEC ) ) {
#if 0
pr_notice ( " Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation \n " ,
raw_smp_processor_id ( ) ,
current - > comm , current - > pid ,
field , address , write ,
field , regs - > cp0_epc ) ;
# endif
goto bad_area ;
}
2015-07-23 11:10:38 +02:00
if ( ! ( vma - > vm_flags & VM_READ ) & &
exception_epc ( regs ) ! = address ) {
2010-02-10 15:12:47 -08:00
#if 0
pr_notice ( " Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation \n " ,
raw_smp_processor_id ( ) ,
current - > comm , current - > pid ,
field , address , write ,
field , regs - > cp0_epc ) ;
# endif
goto bad_area ;
}
} else {
2020-04-06 20:03:59 -07:00
if ( unlikely ( ! vma_is_accessible ( vma ) ) )
2010-02-10 15:12:47 -08:00
goto bad_area ;
}
2005-04-16 15:20:36 -07:00
}
/*
* If for any reason at all we couldn ' t handle the fault ,
* make sure we exit gracefully rather than endlessly redo
* the fault .
*/
mm: do page fault accounting in handle_mm_fault
Patch series "mm: Page fault accounting cleanups", v5.
This is v5 of the pf accounting cleanup series. It originates from Gerald
Schaefer's report on an issue a week ago regarding to incorrect page fault
accountings for retried page fault after commit 4064b9827063 ("mm: allow
VM_FAULT_RETRY for multiple times"):
https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/
What this series did:
- Correct page fault accounting: we do accounting for a page fault
(no matter whether it's from #PF handling, or gup, or anything else)
only with the one that completed the fault. For example, page fault
retries should not be counted in page fault counters. Same to the
perf events.
- Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf
event is used in an adhoc way across different archs.
Case (1): for many archs it's done at the entry of a page fault
handler, so that it will also cover e.g. errornous faults.
Case (2): for some other archs, it is only accounted when the page
fault is resolved successfully.
Case (3): there're still quite some archs that have not enabled
this perf event.
Since this series will touch merely all the archs, we unify this
perf event to always follow case (1), which is the one that makes most
sense. And since we moved the accounting into handle_mm_fault, the
other two MAJ/MIN perf events are well taken care of naturally.
- Unify definition of "major faults": the definition of "major
fault" is slightly changed when used in accounting (not
VM_FAULT_MAJOR). More information in patch 1.
- Always account the page fault onto the one that triggered the page
fault. This does not matter much for #PF handlings, but mostly for
gup. More information on this in patch 25.
Patchset layout:
Patch 1: Introduced the accounting in handle_mm_fault(), not enabled.
Patch 2-23: Enable the new accounting for arch #PF handlers one by one.
Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.)
Patch 25: Cleanup GUP task_struct pointer since it's not needed any more
This patch (of 25):
This is a preparation patch to move page fault accountings into the
general code in handle_mm_fault(). This includes both the per task
flt_maj/flt_min counters, and the major/minor page fault perf events. To
do this, the pt_regs pointer is passed into handle_mm_fault().
PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault
handlers.
So far, all the pt_regs pointer that passed into handle_mm_fault() is
NULL, which means this patch should have no intented functional change.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com
Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-08-11 18:37:44 -07:00
fault = handle_mm_fault ( vma , address , flags , NULL ) ;
2011-12-23 16:52:42 +05:30
2020-04-01 21:08:06 -07:00
if ( fault_signal_pending ( fault , regs ) )
2011-12-23 16:52:42 +05:30
return ;
2011-06-27 14:41:57 +02:00
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , address ) ;
2007-07-19 01:47:05 -07:00
if ( unlikely ( fault & VM_FAULT_ERROR ) ) {
if ( fault & VM_FAULT_OOM )
goto out_of_memory ;
vm: add VM_FAULT_SIGSEGV handling support
The core VM already knows about VM_FAULT_SIGBUS, but cannot return a
"you should SIGSEGV" error, because the SIGSEGV case was generally
handled by the caller - usually the architecture fault handler.
That results in lots of duplication - all the architecture fault
handlers end up doing very similar "look up vma, check permissions, do
retries etc" - but it generally works. However, there are cases where
the VM actually wants to SIGSEGV, and applications _expect_ SIGSEGV.
In particular, when accessing the stack guard page, libsigsegv expects a
SIGSEGV. And it usually got one, because the stack growth is handled by
that duplicated architecture fault handler.
However, when the generic VM layer started propagating the error return
from the stack expansion in commit fee7e49d4514 ("mm: propagate error
from stack expansion even for guard page"), that now exposed the
existing VM_FAULT_SIGBUS result to user space. And user space really
expected SIGSEGV, not SIGBUS.
To fix that case, we need to add a VM_FAULT_SIGSEGV, and teach all those
duplicate architecture fault handlers about it. They all already have
the code to handle SIGSEGV, so it's about just tying that new return
value to the existing code, but it's all a bit annoying.
This is the mindless minimal patch to do this. A more extensive patch
would be to try to gather up the mostly shared fault handling logic into
one generic helper routine, and long-term we really should do that
cleanup.
Just from this patch, you can generally see that most architectures just
copied (directly or indirectly) the old x86 way of doing things, but in
the meantime that original x86 model has been improved to hold the VM
semaphore for shorter times etc and to handle VM_FAULT_RETRY and other
"newer" things, so it would be a good idea to bring all those
improvements to the generic case and teach other architectures about
them too.
Reported-and-tested-by: Takashi Iwai <tiwai@suse.de>
Tested-by: Jan Engelhardt <jengelh@inai.de>
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> # "s390 still compiles and boots"
Cc: linux-arch@vger.kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-01-29 10:51:32 -08:00
else if ( fault & VM_FAULT_SIGSEGV )
goto bad_area ;
2007-07-19 01:47:05 -07:00
else if ( fault & VM_FAULT_SIGBUS )
goto do_sigbus ;
2005-04-16 15:20:36 -07:00
BUG ( ) ;
}
2011-12-23 16:52:42 +05:30
if ( flags & FAULT_FLAG_ALLOW_RETRY ) {
if ( fault & VM_FAULT_MAJOR ) {
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MAJ , 1 ,
regs , address ) ;
tsk - > maj_flt + + ;
} else {
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MIN , 1 ,
regs , address ) ;
tsk - > min_flt + + ;
}
if ( fault & VM_FAULT_RETRY ) {
2012-10-08 16:32:19 -07:00
flags | = FAULT_FLAG_TRIED ;
2011-12-23 16:52:42 +05:30
/*
2020-06-08 21:33:51 -07:00
* No need to mmap_read_unlock ( mm ) as we would
2011-12-23 16:52:42 +05:30
* have already released it in __lock_page_or_retry
* in mm / filemap . c .
*/
goto retry ;
}
2010-10-12 19:37:21 +08:00
}
2005-04-16 15:20:36 -07:00
2020-06-08 21:33:29 -07:00
mmap_read_unlock ( mm ) ;
2005-04-16 15:20:36 -07:00
return ;
/*
* Something tried to access memory that isn ' t in our memory map . .
* Fix it , but check if it ' s kernel or user first . .
*/
bad_area :
2020-06-08 21:33:29 -07:00
mmap_read_unlock ( mm ) ;
2005-04-16 15:20:36 -07:00
bad_area_nosemaphore :
/* User mode accesses just cause a SIGSEGV */
if ( user_mode ( regs ) ) {
tsk - > thread . cp0_badvaddr = address ;
tsk - > thread . error_code = write ;
2015-01-21 10:54:46 +00:00
if ( show_unhandled_signals & &
unhandled_signal ( tsk , SIGSEGV ) & &
__ratelimit ( & ratelimit_state ) ) {
2016-11-09 13:26:25 +00:00
pr_info ( " do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx \n " ,
2015-01-21 10:54:46 +00:00
tsk - > comm ,
write ? " write access to " : " read access from " ,
field , address ) ;
pr_info ( " epc = %0*lx in " , field ,
( unsigned long ) regs - > cp0_epc ) ;
2016-11-09 13:26:25 +00:00
print_vma_addr ( KERN_CONT " " , regs - > cp0_epc ) ;
pr_cont ( " \n " ) ;
2015-01-21 10:54:46 +00:00
pr_info ( " ra = %0*lx in " , field ,
( unsigned long ) regs - > regs [ 31 ] ) ;
2016-11-09 13:26:25 +00:00
print_vma_addr ( KERN_CONT " " , regs - > regs [ 31 ] ) ;
pr_cont ( " \n " ) ;
2015-01-21 10:54:46 +00:00
}
2015-07-28 20:37:43 +02:00
current - > thread . trap_nr = ( regs - > cp0_cause > > 2 ) & 0x1f ;
2019-05-23 11:04:24 -05:00
force_sig_fault ( SIGSEGV , si_code , ( void __user * ) address ) ;
2005-04-16 15:20:36 -07:00
return ;
}
no_context :
2013-01-22 12:59:30 +01:00
/* Are we prepared to handle this kernel fault? */
2005-04-16 15:20:36 -07:00
if ( fixup_exception ( regs ) ) {
current - > thread . cp0_baduaddr = address ;
return ;
}
/*
* Oops . The kernel tried to access some bad page . We ' ll have to
* terminate things with extreme prejudice .
*/
bust_spinlocks ( 1 ) ;
printk ( KERN_ALERT " CPU %d Unable to handle kernel paging request at "
" virtual address %0*lx, epc == %0*lx, ra == %0*lx \n " ,
2007-03-29 22:30:01 +01:00
raw_smp_processor_id ( ) , field , address , field , regs - > cp0_epc ,
2005-04-16 15:20:36 -07:00
field , regs - > regs [ 31 ] ) ;
die ( " Oops " , regs ) ;
out_of_memory :
2009-01-12 00:09:13 +00:00
/*
* We ran out of memory , call the OOM killer , and return the userspace
* ( which will retry the fault , or kill us if we got oom - killed ) .
*/
2020-06-08 21:33:29 -07:00
mmap_read_unlock ( mm ) ;
2013-09-12 15:13:38 -07:00
if ( ! user_mode ( regs ) )
goto no_context ;
2009-01-12 00:09:13 +00:00
pagefault_out_of_memory ( ) ;
return ;
2005-04-16 15:20:36 -07:00
do_sigbus :
2020-06-08 21:33:29 -07:00
mmap_read_unlock ( mm ) ;
2005-04-16 15:20:36 -07:00
/* Kernel mode? Handle exceptions or die */
if ( ! user_mode ( regs ) )
goto no_context ;
2017-03-30 14:27:02 -07:00
2005-04-16 15:20:36 -07:00
/*
* Send a sigbus , regardless of whether we were in kernel
* or user mode .
*/
2006-04-05 09:45:45 +01:00
#if 0
2017-03-30 14:27:02 -07:00
printk ( " do_page_fault() #3: sending SIGBUS to %s for "
" invalid %s \n %0*lx (epc == %0*lx, ra == %0*lx) \n " ,
tsk - > comm ,
write ? " write access to " : " read access from " ,
field , address ,
field , ( unsigned long ) regs - > cp0_epc ,
field , ( unsigned long ) regs - > regs [ 31 ] ) ;
2006-04-05 09:45:45 +01:00
# endif
2015-07-28 20:37:43 +02:00
current - > thread . trap_nr = ( regs - > cp0_cause > > 2 ) & 0x1f ;
2005-04-16 15:20:36 -07:00
tsk - > thread . cp0_badvaddr = address ;
2019-05-23 11:04:24 -05:00
force_sig_fault ( SIGBUS , BUS_ADRERR , ( void __user * ) address ) ;
2005-04-16 15:20:36 -07:00
return ;
2009-09-02 15:47:34 -07:00
# ifndef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
vmalloc_fault :
{
/*
* Synchronize this task ' s top level page - table
* with the ' reference ' page table .
*
* Do _not_ use " tsk " here . We might be inside
* an interrupt in the middle of a task switch . .
*/
2019-11-21 18:21:32 +02:00
int offset = pgd_index ( address ) ;
2005-04-16 15:20:36 -07:00
pgd_t * pgd , * pgd_k ;
2019-11-21 18:21:33 +02:00
p4d_t * p4d , * p4d_k ;
2005-02-10 12:19:59 +00:00
pud_t * pud , * pud_k ;
2005-04-16 15:20:36 -07:00
pmd_t * pmd , * pmd_k ;
pte_t * pte_k ;
2007-03-29 22:30:01 +01:00
pgd = ( pgd_t * ) pgd_current [ raw_smp_processor_id ( ) ] + offset ;
2005-04-16 15:20:36 -07:00
pgd_k = init_mm . pgd + offset ;
if ( ! pgd_present ( * pgd_k ) )
goto no_context ;
set_pgd ( pgd , * pgd_k ) ;
2019-11-21 18:21:33 +02:00
p4d = p4d_offset ( pgd , address ) ;
p4d_k = p4d_offset ( pgd_k , address ) ;
if ( ! p4d_present ( * p4d_k ) )
goto no_context ;
pud = pud_offset ( p4d , address ) ;
pud_k = pud_offset ( p4d_k , address ) ;
2005-02-10 12:19:59 +00:00
if ( ! pud_present ( * pud_k ) )
goto no_context ;
pmd = pmd_offset ( pud , address ) ;
pmd_k = pmd_offset ( pud_k , address ) ;
2005-04-16 15:20:36 -07:00
if ( ! pmd_present ( * pmd_k ) )
goto no_context ;
set_pmd ( pmd , * pmd_k ) ;
pte_k = pte_offset_kernel ( pmd_k , address ) ;
if ( ! pte_present ( * pte_k ) )
goto no_context ;
return ;
}
2009-09-02 15:47:34 -07:00
# endif
2005-04-16 15:20:36 -07:00
}
2013-05-29 01:07:19 +02:00
asmlinkage void __kprobes do_page_fault ( struct pt_regs * regs ,
unsigned long write , unsigned long address )
{
enum ctx_state prev_state ;
prev_state = exception_enter ( ) ;
__do_page_fault ( regs , write , address ) ;
exception_exit ( prev_state ) ;
}