2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / mm / fault . c
*
* Copyright ( C ) 1995 Linus Torvalds
* Modifications for ARM processor ( c ) 1995 - 2004 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2016-07-23 14:01:45 -04:00
# include <linux/extable.h>
2005-04-16 15:20:36 -07:00
# include <linux/signal.h>
# include <linux/mm.h>
2008-12-14 18:01:44 +00:00
# include <linux/hardirq.h>
2005-04-16 15:20:36 -07:00
# include <linux/init.h>
2007-12-03 15:21:57 -05:00
# include <linux/kprobes.h>
2008-09-06 11:35:55 +01:00
# include <linux/uaccess.h>
2008-09-11 11:52:02 -04:00
# include <linux/page-flags.h>
2009-07-24 12:37:09 +01:00
# include <linux/sched.h>
2009-08-17 20:02:06 +01:00
# include <linux/highmem.h>
2010-02-02 20:24:58 +01:00
# include <linux/perf_event.h>
2005-04-16 15:20:36 -07:00
2011-10-08 11:20:42 +01:00
# include <asm/exception.h>
2005-04-16 15:20:36 -07:00
# include <asm/pgtable.h>
2012-03-28 18:30:01 +01:00
# include <asm/system_misc.h>
# include <asm/system_info.h>
2005-04-16 15:20:36 -07:00
# include <asm/tlbflush.h>
# include "fault.h"
2009-07-24 12:34:55 +01:00
# ifdef CONFIG_MMU
2007-12-03 15:21:57 -05:00
# ifdef CONFIG_KPROBES
static inline int notify_page_fault ( struct pt_regs * regs , unsigned int fsr )
{
int ret = 0 ;
if ( ! user_mode ( regs ) ) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable ( ) ;
if ( kprobe_running ( ) & & kprobe_fault_handler ( regs , fsr ) )
ret = 1 ;
preempt_enable ( ) ;
}
return ret ;
}
# else
static inline int notify_page_fault ( struct pt_regs * regs , unsigned int fsr )
{
return 0 ;
}
# endif
2005-04-16 15:20:36 -07:00
/*
* This is useful to dump out the page tables associated with
* ' addr ' in mm ' mm ' .
*/
void show_pte ( struct mm_struct * mm , unsigned long addr )
{
pgd_t * pgd ;
if ( ! mm )
mm = & init_mm ;
2014-10-28 11:26:42 +00:00
pr_alert ( " pgd = %p \n " , mm - > pgd ) ;
2005-04-16 15:20:36 -07:00
pgd = pgd_offset ( mm , addr ) ;
2014-10-28 11:26:42 +00:00
pr_alert ( " [%08lx] *pgd=%08llx " ,
2011-02-15 14:31:37 +01:00
addr , ( long long ) pgd_val ( * pgd ) ) ;
2005-04-16 15:20:36 -07:00
do {
2010-11-21 16:27:49 +00:00
pud_t * pud ;
2005-04-16 15:20:36 -07:00
pmd_t * pmd ;
pte_t * pte ;
if ( pgd_none ( * pgd ) )
break ;
if ( pgd_bad ( * pgd ) ) {
2014-10-28 11:26:42 +00:00
pr_cont ( " (bad) " ) ;
2005-04-16 15:20:36 -07:00
break ;
}
2010-11-21 16:27:49 +00:00
pud = pud_offset ( pgd , addr ) ;
if ( PTRS_PER_PUD ! = 1 )
2014-10-28 11:26:42 +00:00
pr_cont ( " , *pud=%08llx " , ( long long ) pud_val ( * pud ) ) ;
2010-11-21 16:27:49 +00:00
if ( pud_none ( * pud ) )
break ;
if ( pud_bad ( * pud ) ) {
2014-10-28 11:26:42 +00:00
pr_cont ( " (bad) " ) ;
2010-11-21 16:27:49 +00:00
break ;
}
pmd = pmd_offset ( pud , addr ) ;
2008-09-30 16:10:11 +01:00
if ( PTRS_PER_PMD ! = 1 )
2014-10-28 11:26:42 +00:00
pr_cont ( " , *pmd=%08llx " , ( long long ) pmd_val ( * pmd ) ) ;
2005-04-16 15:20:36 -07:00
if ( pmd_none ( * pmd ) )
break ;
if ( pmd_bad ( * pmd ) ) {
2014-10-28 11:26:42 +00:00
pr_cont ( " (bad) " ) ;
2005-04-16 15:20:36 -07:00
break ;
}
/* We must not map this if we have highmem enabled */
2008-09-11 11:52:02 -04:00
if ( PageHighMem ( pfn_to_page ( pmd_val ( * pmd ) > > PAGE_SHIFT ) ) )
break ;
2005-04-16 15:20:36 -07:00
pte = pte_offset_map ( pmd , addr ) ;
2014-10-28 11:26:42 +00:00
pr_cont ( " , *pte=%08llx " , ( long long ) pte_val ( * pte ) ) ;
2011-11-22 17:30:31 +00:00
# ifndef CONFIG_ARM_LPAE
2014-10-28 11:26:42 +00:00
pr_cont ( " , *ppte=%08llx " ,
2011-02-15 14:31:37 +01:00
( long long ) pte_val ( pte [ PTE_HWTABLE_PTRS ] ) ) ;
2011-11-22 17:30:31 +00:00
# endif
2005-04-16 15:20:36 -07:00
pte_unmap ( pte ) ;
} while ( 0 ) ;
2014-10-28 11:26:42 +00:00
pr_cont ( " \n " ) ;
2005-04-16 15:20:36 -07:00
}
2009-07-24 12:34:55 +01:00
# else /* CONFIG_MMU */
void show_pte ( struct mm_struct * mm , unsigned long addr )
{ }
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
/*
* Oops . The kernel tried to access some page that wasn ' t present .
*/
static void
__do_kernel_fault ( struct mm_struct * mm , unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
/*
* Are we prepared to handle this kernel fault ?
*/
if ( fixup_exception ( regs ) )
return ;
/*
* No handler , we ' ll have to terminate things with extreme prejudice .
*/
bust_spinlocks ( 1 ) ;
2014-10-28 11:26:42 +00:00
pr_alert ( " Unable to handle kernel %s at virtual address %08lx \n " ,
( addr < PAGE_SIZE ) ? " NULL pointer dereference " :
" paging request " , addr ) ;
2005-04-16 15:20:36 -07:00
show_pte ( mm , addr ) ;
die ( " Oops " , regs , fsr ) ;
bust_spinlocks ( 0 ) ;
do_exit ( SIGKILL ) ;
}
/*
* Something tried to access memory that isn ' t in our memory map . .
* User mode accesses just cause a SIGSEGV
*/
static void
__do_user_fault ( struct task_struct * tsk , unsigned long addr ,
2005-04-16 15:23:55 -07:00
unsigned int fsr , unsigned int sig , int code ,
struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
struct siginfo si ;
# ifdef CONFIG_DEBUG_USER
2012-02-06 15:45:36 +01:00
if ( ( ( user_debug & UDBG_SEGV ) & & ( sig = = SIGSEGV ) ) | |
( ( user_debug & UDBG_BUS ) & & ( sig = = SIGBUS ) ) ) {
2005-04-16 15:23:55 -07:00
printk ( KERN_DEBUG " %s: unhandled page fault (%d) at 0x%08lx, code 0x%03x \n " ,
tsk - > comm , sig , addr , fsr ) ;
2005-04-16 15:20:36 -07:00
show_pte ( tsk - > mm , addr ) ;
show_regs ( regs ) ;
}
# endif
tsk - > thread . address = addr ;
tsk - > thread . error_code = fsr ;
tsk - > thread . trap_no = 14 ;
2005-04-16 15:23:55 -07:00
si . si_signo = sig ;
2005-04-16 15:20:36 -07:00
si . si_errno = 0 ;
si . si_code = code ;
si . si_addr = ( void __user * ) addr ;
2005-04-16 15:23:55 -07:00
force_sig_info ( sig , & si , tsk ) ;
2005-04-16 15:20:36 -07:00
}
2006-09-27 16:13:48 +01:00
void do_bad_area ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
2006-09-27 16:13:48 +01:00
struct task_struct * tsk = current ;
struct mm_struct * mm = tsk - > active_mm ;
2005-04-16 15:20:36 -07:00
/*
* If we are in kernel mode at this point , we
* have no context to handle this fault with .
*/
if ( user_mode ( regs ) )
2005-04-16 15:23:55 -07:00
__do_user_fault ( tsk , addr , fsr , SIGSEGV , SEGV_MAPERR , regs ) ;
2005-04-16 15:20:36 -07:00
else
__do_kernel_fault ( mm , addr , fsr , regs ) ;
}
2009-07-24 12:34:55 +01:00
# ifdef CONFIG_MMU
2007-07-20 09:21:06 +02:00
# define VM_FAULT_BADMAP 0x010000
# define VM_FAULT_BADACCESS 0x020000
2005-04-16 15:20:36 -07:00
2009-09-20 12:53:01 +01:00
/*
* Check that the permissions on the VMA allow for the fault which occurred .
* If we encountered a write fault , we must have write permission , otherwise
* we allow any permission .
*/
static inline bool access_error ( unsigned int fsr , struct vm_area_struct * vma )
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC ;
if ( fsr & FSR_WRITE )
mask = VM_WRITE ;
2009-09-20 13:18:47 +01:00
if ( fsr & FSR_LNX_PF )
mask = VM_EXEC ;
2009-09-20 12:53:01 +01:00
return vma - > vm_flags & mask ? false : true ;
}
static int __kprobes
2005-04-16 15:20:36 -07:00
__do_page_fault ( struct mm_struct * mm , unsigned long addr , unsigned int fsr ,
2011-11-27 17:49:50 +01:00
unsigned int flags , struct task_struct * tsk )
2005-04-16 15:20:36 -07:00
{
struct vm_area_struct * vma ;
2009-09-20 12:53:01 +01:00
int fault ;
2005-04-16 15:20:36 -07:00
vma = find_vma ( mm , addr ) ;
fault = VM_FAULT_BADMAP ;
2009-09-20 12:53:01 +01:00
if ( unlikely ( ! vma ) )
2005-04-16 15:20:36 -07:00
goto out ;
2009-09-20 12:53:01 +01:00
if ( unlikely ( vma - > vm_start > addr ) )
2005-04-16 15:20:36 -07:00
goto check_stack ;
/*
* Ok , we have a good vm_area for this
* memory access , so we can handle it .
*/
good_area :
2009-09-20 12:53:01 +01:00
if ( access_error ( fsr , vma ) ) {
fault = VM_FAULT_BADACCESS ;
2005-04-16 15:20:36 -07:00
goto out ;
2009-09-20 12:53:01 +01:00
}
2005-04-16 15:20:36 -07:00
2016-07-26 15:25:18 -07:00
return handle_mm_fault ( vma , addr & PAGE_MASK , flags ) ;
2005-04-16 15:20:36 -07:00
check_stack :
2012-05-16 15:19:20 +01:00
/* Don't allow expansion below FIRST_USER_ADDRESS */
if ( vma - > vm_flags & VM_GROWSDOWN & &
addr > = FIRST_USER_ADDRESS & & ! expand_stack ( vma , addr ) )
2005-04-16 15:20:36 -07:00
goto good_area ;
out :
return fault ;
}
2007-12-03 15:27:56 -05:00
static int __kprobes
2005-04-16 15:20:36 -07:00
do_page_fault ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
struct task_struct * tsk ;
struct mm_struct * mm ;
2005-04-16 15:23:55 -07:00
int fault , sig , code ;
2013-09-12 15:13:39 -07:00
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE ;
2005-04-16 15:20:36 -07:00
2007-12-03 15:21:57 -05:00
if ( notify_page_fault ( regs , fsr ) )
return 0 ;
2005-04-16 15:20:36 -07:00
tsk = current ;
mm = tsk - > mm ;
2011-06-25 11:44:06 +01:00
/* Enable interrupts if they were enabled in the parent context. */
if ( interrupts_enabled ( regs ) )
local_irq_enable ( ) ;
2005-04-16 15:20:36 -07:00
/*
* If we ' re in an interrupt or have no user
* context , we must not take the fault . .
*/
2015-05-11 17:52:11 +02:00
if ( faulthandler_disabled ( ) | | ! mm )
2005-04-16 15:20:36 -07:00
goto no_context ;
2013-09-12 15:13:39 -07:00
if ( user_mode ( regs ) )
flags | = FAULT_FLAG_USER ;
if ( fsr & FSR_WRITE )
flags | = FAULT_FLAG_WRITE ;
2005-09-20 17:52:13 +01:00
/*
* As per x86 , we may deadlock here . However , since the kernel only
* validly references user space from well defined areas of the code ,
* we can bug out early if this is from code which shouldn ' t .
*/
if ( ! down_read_trylock ( & mm - > mmap_sem ) ) {
if ( ! user_mode ( regs ) & & ! search_exception_tables ( regs - > ARM_pc ) )
goto no_context ;
2011-11-27 17:49:50 +01:00
retry :
2005-09-20 17:52:13 +01:00
down_read ( & mm - > mmap_sem ) ;
2009-09-20 12:52:19 +01:00
} else {
/*
* The above down_read_trylock ( ) might have succeeded in
* which case , we ' ll have missed the might_sleep ( ) from
* down_read ( )
*/
might_sleep ( ) ;
2009-10-05 13:40:44 +01:00
# ifdef CONFIG_DEBUG_VM
if ( ! user_mode ( regs ) & &
! search_exception_tables ( regs - > ARM_pc ) )
goto no_context ;
# endif
2005-09-20 17:52:13 +01:00
}
2011-11-27 17:49:50 +01:00
fault = __do_page_fault ( mm , addr , fsr , flags , tsk ) ;
/* If we need to retry but a fatal signal is pending, handle the
* signal first . We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm / filemap . c . */
if ( ( fault & VM_FAULT_RETRY ) & & fatal_signal_pending ( current ) )
return 0 ;
/*
* Major / minor page fault accounting is only done on the
* initial attempt . If we go through a retry , it is extremely
* likely that the page will be found in page cache at that point .
*/
2005-04-16 15:20:36 -07:00
2011-06-27 14:41:57 +02:00
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , addr ) ;
2012-04-02 18:19:49 +01:00
if ( ! ( fault & VM_FAULT_ERROR ) & & flags & FAULT_FLAG_ALLOW_RETRY ) {
2011-11-27 17:49:50 +01:00
if ( fault & VM_FAULT_MAJOR ) {
tsk - > maj_flt + + ;
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MAJ , 1 ,
regs , addr ) ;
} else {
tsk - > min_flt + + ;
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MIN , 1 ,
regs , addr ) ;
}
if ( fault & VM_FAULT_RETRY ) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation . */
flags & = ~ FAULT_FLAG_ALLOW_RETRY ;
2012-10-08 16:32:19 -07:00
flags | = FAULT_FLAG_TRIED ;
2011-11-27 17:49:50 +01:00
goto retry ;
}
}
up_read ( & mm - > mmap_sem ) ;
2010-02-02 20:24:58 +01:00
2005-04-16 15:20:36 -07:00
/*
2016-03-17 14:19:55 -07:00
* Handle the " normal " case first - VM_FAULT_MAJOR
2005-04-16 15:20:36 -07:00
*/
2007-07-20 09:21:06 +02:00
if ( likely ( ! ( fault & ( VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS ) ) ) )
2005-04-16 15:20:36 -07:00
return 0 ;
2013-09-12 15:13:38 -07:00
/*
* If we are in kernel mode at this point , we
* have no context to handle this fault with .
*/
if ( ! user_mode ( regs ) )
goto no_context ;
2009-09-20 12:47:40 +01:00
if ( fault & VM_FAULT_OOM ) {
/*
* We ran out of memory , call the OOM killer , and return to
* userspace ( which will retry the fault , or kill us if we
* got oom - killed )
*/
pagefault_out_of_memory ( ) ;
return 0 ;
}
2007-07-19 01:47:05 -07:00
if ( fault & VM_FAULT_SIGBUS ) {
2005-04-16 15:23:55 -07:00
/*
* We had some memory , but were unable to
* successfully fix up this page fault .
*/
sig = SIGBUS ;
code = BUS_ADRERR ;
2007-07-19 01:47:05 -07:00
} else {
2005-04-16 15:23:55 -07:00
/*
* Something tried to access memory that
* isn ' t in our memory map . .
*/
sig = SIGSEGV ;
code = fault = = VM_FAULT_BADACCESS ?
SEGV_ACCERR : SEGV_MAPERR ;
2005-04-16 15:20:36 -07:00
}
2005-04-16 15:23:55 -07:00
__do_user_fault ( tsk , addr , fsr , sig , code , regs ) ;
return 0 ;
2005-04-16 15:20:36 -07:00
no_context :
__do_kernel_fault ( mm , addr , fsr , regs ) ;
return 0 ;
}
2009-07-24 12:34:55 +01:00
# else /* CONFIG_MMU */
static int
do_page_fault ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
return 0 ;
}
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn ' t contain
* a valid entry for the address .
*
* If the address is in kernel space ( > = TASK_SIZE ) , then we are
* probably faulting in the vmalloc ( ) area .
*
* If the init_task ' s first level page tables contains the relevant
* entry , we copy the it to this task . If not , we send the process
* a signal , fixup the exception , or oops the kernel .
*
* NOTE ! We MUST NOT take any locks for this case . We may be in an
* interrupt or a critical region , and should only copy the information
* from the master page table , nothing more .
*/
2009-07-24 12:34:55 +01:00
# ifdef CONFIG_MMU
2007-12-03 15:27:56 -05:00
static int __kprobes
2005-04-16 15:20:36 -07:00
do_translation_fault ( unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
unsigned int index ;
pgd_t * pgd , * pgd_k ;
2010-11-21 16:27:49 +00:00
pud_t * pud , * pud_k ;
2005-04-16 15:20:36 -07:00
pmd_t * pmd , * pmd_k ;
if ( addr < TASK_SIZE )
return do_page_fault ( addr , fsr , regs ) ;
2010-06-08 15:16:49 +01:00
if ( user_mode ( regs ) )
goto bad_area ;
2005-04-16 15:20:36 -07:00
index = pgd_index ( addr ) ;
pgd = cpu_get_pgd ( ) + index ;
pgd_k = init_mm . pgd + index ;
if ( pgd_none ( * pgd_k ) )
goto bad_area ;
if ( ! pgd_present ( * pgd ) )
set_pgd ( pgd , * pgd_k ) ;
2010-11-21 16:27:49 +00:00
pud = pud_offset ( pgd , addr ) ;
pud_k = pud_offset ( pgd_k , addr ) ;
if ( pud_none ( * pud_k ) )
goto bad_area ;
if ( ! pud_present ( * pud ) )
set_pud ( pud , * pud_k ) ;
pmd = pmd_offset ( pud , addr ) ;
pmd_k = pmd_offset ( pud_k , addr ) ;
2005-04-16 15:20:36 -07:00
2011-11-22 17:30:31 +00:00
# ifdef CONFIG_ARM_LPAE
/*
* Only one hardware entry per PMD with LPAE .
*/
index = 0 ;
# else
2010-07-22 13:20:22 +01:00
/*
* On ARM one Linux PGD entry contains two hardware entries ( see page
* tables layout in pgtable . h ) . We normally guarantee that we always
* fill both L1 entries . But create_mapping ( ) doesn ' t follow the rule .
* It can create inidividual L1 entries , so here we have to call
* pmd_none ( ) check for the entry really corresponded to address , not
* for the first of pair .
*/
index = ( addr > > SECTION_SHIFT ) & 1 ;
2011-11-22 17:30:31 +00:00
# endif
2010-07-22 13:20:22 +01:00
if ( pmd_none ( pmd_k [ index ] ) )
2005-04-16 15:20:36 -07:00
goto bad_area ;
copy_pmd ( pmd , pmd_k ) ;
return 0 ;
bad_area :
2006-09-27 16:13:48 +01:00
do_bad_area ( addr , fsr , regs ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2009-07-24 12:34:55 +01:00
# else /* CONFIG_MMU */
static int
do_translation_fault ( unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
return 0 ;
}
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
/*
* Some section permission faults need to be handled gracefully .
* They can happen due to a __ { get , put } _user during an oops .
*/
2013-06-25 08:45:51 +01:00
# ifndef CONFIG_ARM_LPAE
2005-04-16 15:20:36 -07:00
static int
do_sect_fault ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
2006-09-27 16:13:48 +01:00
do_bad_area ( addr , fsr , regs ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2013-06-25 08:45:51 +01:00
# endif /* CONFIG_ARM_LPAE */
2005-04-16 15:20:36 -07:00
/*
* This abort handler always returns " fault " .
*/
static int
do_bad ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
return 1 ;
}
2011-11-22 17:30:28 +00:00
struct fsr_info {
2005-04-16 15:20:36 -07:00
int ( * fn ) ( unsigned long addr , unsigned int fsr , struct pt_regs * regs ) ;
int sig ;
2005-06-30 11:06:49 +01:00
int code ;
2005-04-16 15:20:36 -07:00
const char * name ;
} ;
2011-11-22 17:30:28 +00:00
/* FSR definition */
2011-11-22 17:30:31 +00:00
# ifdef CONFIG_ARM_LPAE
# include "fsr-3level.c"
# else
2011-11-22 17:30:28 +00:00
# include "fsr-2level.c"
2011-11-22 17:30:31 +00:00
# endif
2011-11-22 17:30:28 +00:00
2005-04-16 15:20:36 -07:00
void __init
hook_fault_code ( int nr , int ( * fn ) ( unsigned long , unsigned int , struct pt_regs * ) ,
2010-07-22 13:18:19 +01:00
int sig , int code , const char * name )
2005-04-16 15:20:36 -07:00
{
2010-07-22 13:18:19 +01:00
if ( nr < 0 | | nr > = ARRAY_SIZE ( fsr_info ) )
BUG ( ) ;
fsr_info [ nr ] . fn = fn ;
fsr_info [ nr ] . sig = sig ;
fsr_info [ nr ] . code = code ;
fsr_info [ nr ] . name = name ;
2005-04-16 15:20:36 -07:00
}
/*
* Dispatch a data abort to the relevant handler .
*/
2007-03-02 15:01:36 +00:00
asmlinkage void __exception
2005-04-16 15:20:36 -07:00
do_DataAbort ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
2009-09-20 12:41:58 +01:00
const struct fsr_info * inf = fsr_info + fsr_fs ( fsr ) ;
2005-06-30 11:06:49 +01:00
struct siginfo info ;
2005-04-16 15:20:36 -07:00
2009-09-20 13:18:47 +01:00
if ( ! inf - > fn ( addr , fsr & ~ FSR_LNX_PF , regs ) )
2005-04-16 15:20:36 -07:00
return ;
2014-10-28 11:26:42 +00:00
pr_alert ( " Unhandled fault: %s (0x%03x) at 0x%08lx \n " ,
2005-04-16 15:20:36 -07:00
inf - > name , fsr , addr ) ;
2015-03-10 19:40:55 +00:00
show_pte ( current - > mm , addr ) ;
2005-06-30 11:06:49 +01:00
info . si_signo = inf - > sig ;
info . si_errno = 0 ;
info . si_code = inf - > code ;
info . si_addr = ( void __user * ) addr ;
2007-05-08 00:27:03 -07:00
arm_notify_die ( " " , regs , & info , fsr , 0 ) ;
2005-04-16 15:20:36 -07:00
}
2010-09-03 10:39:59 +01:00
void __init
hook_ifault_code ( int nr , int ( * fn ) ( unsigned long , unsigned int , struct pt_regs * ) ,
int sig , int code , const char * name )
{
if ( nr < 0 | | nr > = ARRAY_SIZE ( ifsr_info ) )
BUG ( ) ;
ifsr_info [ nr ] . fn = fn ;
ifsr_info [ nr ] . sig = sig ;
ifsr_info [ nr ] . code = code ;
ifsr_info [ nr ] . name = name ;
}
2007-03-02 15:01:36 +00:00
asmlinkage void __exception
2009-09-25 13:39:47 +01:00
do_PrefetchAbort ( unsigned long addr , unsigned int ifsr , struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
2009-09-25 13:40:49 +01:00
const struct fsr_info * inf = ifsr_info + fsr_fs ( ifsr ) ;
struct siginfo info ;
if ( ! inf - > fn ( addr , ifsr | FSR_LNX_PF , regs ) )
return ;
2014-10-28 11:26:42 +00:00
pr_alert ( " Unhandled prefetch abort: %s (0x%03x) at 0x%08lx \n " ,
2009-09-25 13:40:49 +01:00
inf - > name , ifsr , addr ) ;
info . si_signo = inf - > sig ;
info . si_errno = 0 ;
info . si_code = inf - > code ;
info . si_addr = ( void __user * ) addr ;
arm_notify_die ( " " , regs , & info , ifsr , 0 ) ;
2005-04-16 15:20:36 -07:00
}
2015-10-19 13:38:09 +01:00
/*
* Abort handler to be used only during first unmasking of asynchronous aborts
* on the boot CPU . This makes sure that the machine will not die if the
* firmware / bootloader left an imprecise abort pending for us to trip over .
*/
static int __init early_abort_handler ( unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
pr_warn ( " Hit pending asynchronous external abort (FSR=0x%08x) during "
" first unmask, this is most likely caused by a "
" firmware/bootloader bug. \n " , fsr ) ;
return 0 ;
}
void __init early_abt_enable ( void )
{
2017-01-17 21:10:11 +01:00
fsr_info [ FSR_FS_AEA ] . fn = early_abort_handler ;
2015-10-19 13:38:09 +01:00
local_abt_enable ( ) ;
2017-01-17 21:10:11 +01:00
fsr_info [ FSR_FS_AEA ] . fn = do_bad ;
2015-10-19 13:38:09 +01:00
}
2011-11-22 17:30:31 +00:00
# ifndef CONFIG_ARM_LPAE
2010-07-22 13:23:25 +01:00
static int __init exceptions_init ( void )
{
if ( cpu_architecture ( ) > = CPU_ARCH_ARMv6 ) {
hook_fault_code ( 4 , do_translation_fault , SIGSEGV , SEGV_MAPERR ,
" I-cache maintenance fault " ) ;
}
2010-07-26 11:20:41 +01:00
if ( cpu_architecture ( ) > = CPU_ARCH_ARMv7 ) {
/*
* TODO : Access flag faults introduced in ARMv6K .
* Runtime check for ' K ' extension is needed
*/
hook_fault_code ( 3 , do_bad , SIGSEGV , SEGV_MAPERR ,
" section access flag fault " ) ;
hook_fault_code ( 6 , do_bad , SIGSEGV , SEGV_MAPERR ,
" section access flag fault " ) ;
}
2010-07-22 13:23:25 +01:00
return 0 ;
}
arch_initcall ( exceptions_init ) ;
2011-11-22 17:30:31 +00:00
# endif