2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / fault . c
*
* Copyright ( C ) 1995 Linus Torvalds
* Modifications for ARM processor ( c ) 1995 - 2004 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/signal.h>
# include <linux/mm.h>
2008-12-14 21:01:44 +03:00
# include <linux/hardirq.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
2007-12-03 23:21:57 +03:00
# include <linux/kprobes.h>
2008-09-06 14:35:55 +04:00
# include <linux/uaccess.h>
2008-09-11 19:52:02 +04:00
# include <linux/page-flags.h>
2009-07-24 15:37:09 +04:00
# include <linux/sched.h>
2009-08-17 23:02:06 +04:00
# include <linux/highmem.h>
2010-02-02 22:24:58 +03:00
# include <linux/perf_event.h>
2005-04-17 02:20:36 +04:00
# include <asm/system.h>
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
# include "fault.h"
2009-09-20 15:41:58 +04:00
/*
2009-09-20 16:18:47 +04:00
* Fault status register encodings . We steal bit 31 for our own purposes .
2009-09-20 15:41:58 +04:00
*/
2009-09-20 16:18:47 +04:00
# define FSR_LNX_PF (1 << 31)
2009-09-20 15:41:58 +04:00
# define FSR_WRITE (1 << 11)
# define FSR_FS4 (1 << 10)
# define FSR_FS3_0 (15)
static inline int fsr_fs ( unsigned int fsr )
{
return ( fsr & FSR_FS3_0 ) | ( fsr & FSR_FS4 ) > > 6 ;
}
2009-07-24 15:34:55 +04:00
# ifdef CONFIG_MMU
2007-12-03 23:21:57 +03:00
# ifdef CONFIG_KPROBES
static inline int notify_page_fault ( struct pt_regs * regs , unsigned int fsr )
{
int ret = 0 ;
if ( ! user_mode ( regs ) ) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable ( ) ;
if ( kprobe_running ( ) & & kprobe_fault_handler ( regs , fsr ) )
ret = 1 ;
preempt_enable ( ) ;
}
return ret ;
}
# else
static inline int notify_page_fault ( struct pt_regs * regs , unsigned int fsr )
{
return 0 ;
}
# endif
2005-04-17 02:20:36 +04:00
/*
* This is useful to dump out the page tables associated with
* ' addr ' in mm ' mm ' .
*/
void show_pte ( struct mm_struct * mm , unsigned long addr )
{
pgd_t * pgd ;
if ( ! mm )
mm = & init_mm ;
printk ( KERN_ALERT " pgd = %p \n " , mm - > pgd ) ;
pgd = pgd_offset ( mm , addr ) ;
2011-02-15 16:31:37 +03:00
printk ( KERN_ALERT " [%08lx] *pgd=%08llx " ,
addr , ( long long ) pgd_val ( * pgd ) ) ;
2005-04-17 02:20:36 +04:00
do {
2010-11-21 19:27:49 +03:00
pud_t * pud ;
2005-04-17 02:20:36 +04:00
pmd_t * pmd ;
pte_t * pte ;
if ( pgd_none ( * pgd ) )
break ;
if ( pgd_bad ( * pgd ) ) {
printk ( " (bad) " ) ;
break ;
}
2010-11-21 19:27:49 +03:00
pud = pud_offset ( pgd , addr ) ;
if ( PTRS_PER_PUD ! = 1 )
printk ( " , *pud=%08lx " , pud_val ( * pud ) ) ;
if ( pud_none ( * pud ) )
break ;
if ( pud_bad ( * pud ) ) {
printk ( " (bad) " ) ;
break ;
}
pmd = pmd_offset ( pud , addr ) ;
2008-09-30 19:10:11 +04:00
if ( PTRS_PER_PMD ! = 1 )
2011-02-15 16:31:37 +03:00
printk ( " , *pmd=%08llx " , ( long long ) pmd_val ( * pmd ) ) ;
2005-04-17 02:20:36 +04:00
if ( pmd_none ( * pmd ) )
break ;
if ( pmd_bad ( * pmd ) ) {
printk ( " (bad) " ) ;
break ;
}
/* We must not map this if we have highmem enabled */
2008-09-11 19:52:02 +04:00
if ( PageHighMem ( pfn_to_page ( pmd_val ( * pmd ) > > PAGE_SHIFT ) ) )
break ;
2005-04-17 02:20:36 +04:00
pte = pte_offset_map ( pmd , addr ) ;
2011-02-15 16:31:37 +03:00
printk ( " , *pte=%08llx " , ( long long ) pte_val ( * pte ) ) ;
printk ( " , *ppte=%08llx " ,
( long long ) pte_val ( pte [ PTE_HWTABLE_PTRS ] ) ) ;
2005-04-17 02:20:36 +04:00
pte_unmap ( pte ) ;
} while ( 0 ) ;
printk ( " \n " ) ;
}
2009-07-24 15:34:55 +04:00
# else /* CONFIG_MMU */
void show_pte ( struct mm_struct * mm , unsigned long addr )
{ }
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
/*
* Oops . The kernel tried to access some page that wasn ' t present .
*/
static void
__do_kernel_fault ( struct mm_struct * mm , unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
/*
* Are we prepared to handle this kernel fault ?
*/
if ( fixup_exception ( regs ) )
return ;
/*
* No handler , we ' ll have to terminate things with extreme prejudice .
*/
bust_spinlocks ( 1 ) ;
printk ( KERN_ALERT
" Unable to handle kernel %s at virtual address %08lx \n " ,
( addr < PAGE_SIZE ) ? " NULL pointer dereference " :
" paging request " , addr ) ;
show_pte ( mm , addr ) ;
die ( " Oops " , regs , fsr ) ;
bust_spinlocks ( 0 ) ;
do_exit ( SIGKILL ) ;
}
/*
* Something tried to access memory that isn ' t in our memory map . .
* User mode accesses just cause a SIGSEGV
*/
static void
__do_user_fault ( struct task_struct * tsk , unsigned long addr ,
2005-04-17 02:23:55 +04:00
unsigned int fsr , unsigned int sig , int code ,
struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
struct siginfo si ;
# ifdef CONFIG_DEBUG_USER
if ( user_debug & UDBG_SEGV ) {
2005-04-17 02:23:55 +04:00
printk ( KERN_DEBUG " %s: unhandled page fault (%d) at 0x%08lx, code 0x%03x \n " ,
tsk - > comm , sig , addr , fsr ) ;
2005-04-17 02:20:36 +04:00
show_pte ( tsk - > mm , addr ) ;
show_regs ( regs ) ;
}
# endif
tsk - > thread . address = addr ;
tsk - > thread . error_code = fsr ;
tsk - > thread . trap_no = 14 ;
2005-04-17 02:23:55 +04:00
si . si_signo = sig ;
2005-04-17 02:20:36 +04:00
si . si_errno = 0 ;
si . si_code = code ;
si . si_addr = ( void __user * ) addr ;
2005-04-17 02:23:55 +04:00
force_sig_info ( sig , & si , tsk ) ;
2005-04-17 02:20:36 +04:00
}
2006-09-27 19:13:48 +04:00
void do_bad_area ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2006-09-27 19:13:48 +04:00
struct task_struct * tsk = current ;
struct mm_struct * mm = tsk - > active_mm ;
2005-04-17 02:20:36 +04:00
/*
* If we are in kernel mode at this point , we
* have no context to handle this fault with .
*/
if ( user_mode ( regs ) )
2005-04-17 02:23:55 +04:00
__do_user_fault ( tsk , addr , fsr , SIGSEGV , SEGV_MAPERR , regs ) ;
2005-04-17 02:20:36 +04:00
else
__do_kernel_fault ( mm , addr , fsr , regs ) ;
}
2009-07-24 15:34:55 +04:00
# ifdef CONFIG_MMU
2007-07-20 11:21:06 +04:00
# define VM_FAULT_BADMAP 0x010000
# define VM_FAULT_BADACCESS 0x020000
2005-04-17 02:20:36 +04:00
2009-09-20 15:53:01 +04:00
/*
* Check that the permissions on the VMA allow for the fault which occurred .
* If we encountered a write fault , we must have write permission , otherwise
* we allow any permission .
*/
static inline bool access_error ( unsigned int fsr , struct vm_area_struct * vma )
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC ;
if ( fsr & FSR_WRITE )
mask = VM_WRITE ;
2009-09-20 16:18:47 +04:00
if ( fsr & FSR_LNX_PF )
mask = VM_EXEC ;
2009-09-20 15:53:01 +04:00
return vma - > vm_flags & mask ? false : true ;
}
static int __kprobes
2005-04-17 02:20:36 +04:00
__do_page_fault ( struct mm_struct * mm , unsigned long addr , unsigned int fsr ,
struct task_struct * tsk )
{
struct vm_area_struct * vma ;
2009-09-20 15:53:01 +04:00
int fault ;
2005-04-17 02:20:36 +04:00
vma = find_vma ( mm , addr ) ;
fault = VM_FAULT_BADMAP ;
2009-09-20 15:53:01 +04:00
if ( unlikely ( ! vma ) )
2005-04-17 02:20:36 +04:00
goto out ;
2009-09-20 15:53:01 +04:00
if ( unlikely ( vma - > vm_start > addr ) )
2005-04-17 02:20:36 +04:00
goto check_stack ;
/*
* Ok , we have a good vm_area for this
* memory access , so we can handle it .
*/
good_area :
2009-09-20 15:53:01 +04:00
if ( access_error ( fsr , vma ) ) {
fault = VM_FAULT_BADACCESS ;
2005-04-17 02:20:36 +04:00
goto out ;
2009-09-20 15:53:01 +04:00
}
2005-04-17 02:20:36 +04:00
/*
2009-09-20 15:47:40 +04:00
* If for any reason at all we couldn ' t handle the fault , make
* sure we exit gracefully rather than endlessly redo the fault .
2005-04-17 02:20:36 +04:00
*/
2009-09-20 15:41:58 +04:00
fault = handle_mm_fault ( mm , vma , addr & PAGE_MASK , ( fsr & FSR_WRITE ) ? FAULT_FLAG_WRITE : 0 ) ;
2009-09-20 15:47:40 +04:00
if ( unlikely ( fault & VM_FAULT_ERROR ) )
return fault ;
2007-07-19 12:47:05 +04:00
if ( fault & VM_FAULT_MAJOR )
2005-04-17 02:20:36 +04:00
tsk - > maj_flt + + ;
2007-07-19 12:47:05 +04:00
else
2005-04-17 02:20:36 +04:00
tsk - > min_flt + + ;
2007-07-19 12:47:05 +04:00
return fault ;
2005-04-17 02:20:36 +04:00
check_stack :
if ( vma - > vm_flags & VM_GROWSDOWN & & ! expand_stack ( vma , addr ) )
goto good_area ;
out :
return fault ;
}
2007-12-03 23:27:56 +03:00
static int __kprobes
2005-04-17 02:20:36 +04:00
do_page_fault ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
struct task_struct * tsk ;
struct mm_struct * mm ;
2005-04-17 02:23:55 +04:00
int fault , sig , code ;
2005-04-17 02:20:36 +04:00
2007-12-03 23:21:57 +03:00
if ( notify_page_fault ( regs , fsr ) )
return 0 ;
2005-04-17 02:20:36 +04:00
tsk = current ;
mm = tsk - > mm ;
/*
* If we ' re in an interrupt or have no user
* context , we must not take the fault . .
*/
2006-12-07 07:32:18 +03:00
if ( in_atomic ( ) | | ! mm )
2005-04-17 02:20:36 +04:00
goto no_context ;
2005-09-20 20:52:13 +04:00
/*
* As per x86 , we may deadlock here . However , since the kernel only
* validly references user space from well defined areas of the code ,
* we can bug out early if this is from code which shouldn ' t .
*/
if ( ! down_read_trylock ( & mm - > mmap_sem ) ) {
if ( ! user_mode ( regs ) & & ! search_exception_tables ( regs - > ARM_pc ) )
goto no_context ;
down_read ( & mm - > mmap_sem ) ;
2009-09-20 15:52:19 +04:00
} else {
/*
* The above down_read_trylock ( ) might have succeeded in
* which case , we ' ll have missed the might_sleep ( ) from
* down_read ( )
*/
might_sleep ( ) ;
2009-10-05 16:40:44 +04:00
# ifdef CONFIG_DEBUG_VM
if ( ! user_mode ( regs ) & &
! search_exception_tables ( regs - > ARM_pc ) )
goto no_context ;
# endif
2005-09-20 20:52:13 +04:00
}
2005-04-17 02:20:36 +04:00
fault = __do_page_fault ( mm , addr , fsr , tsk ) ;
up_read ( & mm - > mmap_sem ) ;
2010-02-02 22:24:58 +03:00
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS , 1 , 0 , regs , addr ) ;
if ( fault & VM_FAULT_MAJOR )
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MAJ , 1 , 0 , regs , addr ) ;
else if ( fault & VM_FAULT_MINOR )
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MIN , 1 , 0 , regs , addr ) ;
2005-04-17 02:20:36 +04:00
/*
2005-08-04 17:17:33 +04:00
* Handle the " normal " case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
2005-04-17 02:20:36 +04:00
*/
2007-07-20 11:21:06 +04:00
if ( likely ( ! ( fault & ( VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS ) ) ) )
2005-04-17 02:20:36 +04:00
return 0 ;
2009-09-20 15:47:40 +04:00
if ( fault & VM_FAULT_OOM ) {
/*
* We ran out of memory , call the OOM killer , and return to
* userspace ( which will retry the fault , or kill us if we
* got oom - killed )
*/
pagefault_out_of_memory ( ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* If we are in kernel mode at this point , we
* have no context to handle this fault with .
*/
if ( ! user_mode ( regs ) )
goto no_context ;
2007-07-19 12:47:05 +04:00
if ( fault & VM_FAULT_SIGBUS ) {
2005-04-17 02:23:55 +04:00
/*
* We had some memory , but were unable to
* successfully fix up this page fault .
*/
sig = SIGBUS ;
code = BUS_ADRERR ;
2007-07-19 12:47:05 +04:00
} else {
2005-04-17 02:23:55 +04:00
/*
* Something tried to access memory that
* isn ' t in our memory map . .
*/
sig = SIGSEGV ;
code = fault = = VM_FAULT_BADACCESS ?
SEGV_ACCERR : SEGV_MAPERR ;
2005-04-17 02:20:36 +04:00
}
2005-04-17 02:23:55 +04:00
__do_user_fault ( tsk , addr , fsr , sig , code , regs ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
no_context :
__do_kernel_fault ( mm , addr , fsr , regs ) ;
return 0 ;
}
2009-07-24 15:34:55 +04:00
# else /* CONFIG_MMU */
static int
do_page_fault ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
return 0 ;
}
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn ' t contain
* a valid entry for the address .
*
* If the address is in kernel space ( > = TASK_SIZE ) , then we are
* probably faulting in the vmalloc ( ) area .
*
* If the init_task ' s first level page tables contains the relevant
* entry , we copy the it to this task . If not , we send the process
* a signal , fixup the exception , or oops the kernel .
*
* NOTE ! We MUST NOT take any locks for this case . We may be in an
* interrupt or a critical region , and should only copy the information
* from the master page table , nothing more .
*/
2009-07-24 15:34:55 +04:00
# ifdef CONFIG_MMU
2007-12-03 23:27:56 +03:00
static int __kprobes
2005-04-17 02:20:36 +04:00
do_translation_fault ( unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
unsigned int index ;
pgd_t * pgd , * pgd_k ;
2010-11-21 19:27:49 +03:00
pud_t * pud , * pud_k ;
2005-04-17 02:20:36 +04:00
pmd_t * pmd , * pmd_k ;
if ( addr < TASK_SIZE )
return do_page_fault ( addr , fsr , regs ) ;
2010-06-08 18:16:49 +04:00
if ( user_mode ( regs ) )
goto bad_area ;
2005-04-17 02:20:36 +04:00
index = pgd_index ( addr ) ;
/*
* FIXME : CP15 C1 is write only on ARMv3 architectures .
*/
pgd = cpu_get_pgd ( ) + index ;
pgd_k = init_mm . pgd + index ;
if ( pgd_none ( * pgd_k ) )
goto bad_area ;
if ( ! pgd_present ( * pgd ) )
set_pgd ( pgd , * pgd_k ) ;
2010-11-21 19:27:49 +03:00
pud = pud_offset ( pgd , addr ) ;
pud_k = pud_offset ( pgd_k , addr ) ;
if ( pud_none ( * pud_k ) )
goto bad_area ;
if ( ! pud_present ( * pud ) )
set_pud ( pud , * pud_k ) ;
pmd = pmd_offset ( pud , addr ) ;
pmd_k = pmd_offset ( pud_k , addr ) ;
2005-04-17 02:20:36 +04:00
2010-07-22 16:20:22 +04:00
/*
* On ARM one Linux PGD entry contains two hardware entries ( see page
* tables layout in pgtable . h ) . We normally guarantee that we always
* fill both L1 entries . But create_mapping ( ) doesn ' t follow the rule .
* It can create inidividual L1 entries , so here we have to call
* pmd_none ( ) check for the entry really corresponded to address , not
* for the first of pair .
*/
index = ( addr > > SECTION_SHIFT ) & 1 ;
if ( pmd_none ( pmd_k [ index ] ) )
2005-04-17 02:20:36 +04:00
goto bad_area ;
copy_pmd ( pmd , pmd_k ) ;
return 0 ;
bad_area :
2006-09-27 19:13:48 +04:00
do_bad_area ( addr , fsr , regs ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2009-07-24 15:34:55 +04:00
# else /* CONFIG_MMU */
static int
do_translation_fault ( unsigned long addr , unsigned int fsr ,
struct pt_regs * regs )
{
return 0 ;
}
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
/*
* Some section permission faults need to be handled gracefully .
* They can happen due to a __ { get , put } _user during an oops .
*/
static int
do_sect_fault ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
2006-09-27 19:13:48 +04:00
do_bad_area ( addr , fsr , regs ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/*
* This abort handler always returns " fault " .
*/
static int
do_bad ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
return 1 ;
}
static struct fsr_info {
int ( * fn ) ( unsigned long addr , unsigned int fsr , struct pt_regs * regs ) ;
int sig ;
2005-06-30 14:06:49 +04:00
int code ;
2005-04-17 02:20:36 +04:00
const char * name ;
} fsr_info [ ] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts . ARMv5
* defines these to be " precise " aborts .
*/
2005-06-30 14:06:49 +04:00
{ do_bad , SIGSEGV , 0 , " vector exception " } ,
2010-07-22 16:16:49 +04:00
{ do_bad , SIGBUS , BUS_ADRALN , " alignment exception " } ,
2005-06-30 14:06:49 +04:00
{ do_bad , SIGKILL , 0 , " terminal exception " } ,
2010-07-22 16:16:49 +04:00
{ do_bad , SIGBUS , BUS_ADRALN , " alignment exception " } ,
2005-06-30 14:06:49 +04:00
{ do_bad , SIGBUS , 0 , " external abort on linefetch " } ,
{ do_translation_fault , SIGSEGV , SEGV_MAPERR , " section translation fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on linefetch " } ,
{ do_page_fault , SIGSEGV , SEGV_MAPERR , " page translation fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on non-linefetch " } ,
{ do_bad , SIGSEGV , SEGV_ACCERR , " section domain fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on non-linefetch " } ,
{ do_bad , SIGSEGV , SEGV_ACCERR , " page domain fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on translation " } ,
{ do_sect_fault , SIGSEGV , SEGV_ACCERR , " section permission fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on translation " } ,
{ do_page_fault , SIGSEGV , SEGV_ACCERR , " page permission fault " } ,
2005-04-17 02:20:36 +04:00
/*
* The following are " imprecise " aborts , which are signalled by bit
* 10 of the FSR , and may not be recoverable . These are only
* supported if the CPU abort handler supports bit 10.
*/
2005-06-30 14:06:49 +04:00
{ do_bad , SIGBUS , 0 , " unknown 16 " } ,
{ do_bad , SIGBUS , 0 , " unknown 17 " } ,
{ do_bad , SIGBUS , 0 , " unknown 18 " } ,
{ do_bad , SIGBUS , 0 , " unknown 19 " } ,
{ do_bad , SIGBUS , 0 , " lock abort " } , /* xscale */
{ do_bad , SIGBUS , 0 , " unknown 21 " } ,
{ do_bad , SIGBUS , BUS_OBJERR , " imprecise external abort " } , /* xscale */
{ do_bad , SIGBUS , 0 , " unknown 23 " } ,
{ do_bad , SIGBUS , 0 , " dcache parity error " } , /* xscale */
{ do_bad , SIGBUS , 0 , " unknown 25 " } ,
{ do_bad , SIGBUS , 0 , " unknown 26 " } ,
{ do_bad , SIGBUS , 0 , " unknown 27 " } ,
{ do_bad , SIGBUS , 0 , " unknown 28 " } ,
{ do_bad , SIGBUS , 0 , " unknown 29 " } ,
{ do_bad , SIGBUS , 0 , " unknown 30 " } ,
{ do_bad , SIGBUS , 0 , " unknown 31 " }
2005-04-17 02:20:36 +04:00
} ;
void __init
hook_fault_code ( int nr , int ( * fn ) ( unsigned long , unsigned int , struct pt_regs * ) ,
2010-07-22 16:18:19 +04:00
int sig , int code , const char * name )
2005-04-17 02:20:36 +04:00
{
2010-07-22 16:18:19 +04:00
if ( nr < 0 | | nr > = ARRAY_SIZE ( fsr_info ) )
BUG ( ) ;
fsr_info [ nr ] . fn = fn ;
fsr_info [ nr ] . sig = sig ;
fsr_info [ nr ] . code = code ;
fsr_info [ nr ] . name = name ;
2005-04-17 02:20:36 +04:00
}
/*
* Dispatch a data abort to the relevant handler .
*/
2007-03-02 18:01:36 +03:00
asmlinkage void __exception
2005-04-17 02:20:36 +04:00
do_DataAbort ( unsigned long addr , unsigned int fsr , struct pt_regs * regs )
{
2009-09-20 15:41:58 +04:00
const struct fsr_info * inf = fsr_info + fsr_fs ( fsr ) ;
2005-06-30 14:06:49 +04:00
struct siginfo info ;
2005-04-17 02:20:36 +04:00
2009-09-20 16:18:47 +04:00
if ( ! inf - > fn ( addr , fsr & ~ FSR_LNX_PF , regs ) )
2005-04-17 02:20:36 +04:00
return ;
printk ( KERN_ALERT " Unhandled fault: %s (0x%03x) at 0x%08lx \n " ,
inf - > name , fsr , addr ) ;
2005-06-30 14:06:49 +04:00
info . si_signo = inf - > sig ;
info . si_errno = 0 ;
info . si_code = inf - > code ;
info . si_addr = ( void __user * ) addr ;
2007-05-08 11:27:03 +04:00
arm_notify_die ( " " , regs , & info , fsr , 0 ) ;
2005-04-17 02:20:36 +04:00
}
2009-09-25 16:40:49 +04:00
static struct fsr_info ifsr_info [ ] = {
{ do_bad , SIGBUS , 0 , " unknown 0 " } ,
{ do_bad , SIGBUS , 0 , " unknown 1 " } ,
{ do_bad , SIGBUS , 0 , " debug event " } ,
{ do_bad , SIGSEGV , SEGV_ACCERR , " section access flag fault " } ,
{ do_bad , SIGBUS , 0 , " unknown 4 " } ,
{ do_translation_fault , SIGSEGV , SEGV_MAPERR , " section translation fault " } ,
{ do_bad , SIGSEGV , SEGV_ACCERR , " page access flag fault " } ,
{ do_page_fault , SIGSEGV , SEGV_MAPERR , " page translation fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on non-linefetch " } ,
{ do_bad , SIGSEGV , SEGV_ACCERR , " section domain fault " } ,
{ do_bad , SIGBUS , 0 , " unknown 10 " } ,
{ do_bad , SIGSEGV , SEGV_ACCERR , " page domain fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on translation " } ,
{ do_sect_fault , SIGSEGV , SEGV_ACCERR , " section permission fault " } ,
{ do_bad , SIGBUS , 0 , " external abort on translation " } ,
{ do_page_fault , SIGSEGV , SEGV_ACCERR , " page permission fault " } ,
{ do_bad , SIGBUS , 0 , " unknown 16 " } ,
{ do_bad , SIGBUS , 0 , " unknown 17 " } ,
{ do_bad , SIGBUS , 0 , " unknown 18 " } ,
{ do_bad , SIGBUS , 0 , " unknown 19 " } ,
{ do_bad , SIGBUS , 0 , " unknown 20 " } ,
{ do_bad , SIGBUS , 0 , " unknown 21 " } ,
{ do_bad , SIGBUS , 0 , " unknown 22 " } ,
{ do_bad , SIGBUS , 0 , " unknown 23 " } ,
{ do_bad , SIGBUS , 0 , " unknown 24 " } ,
{ do_bad , SIGBUS , 0 , " unknown 25 " } ,
{ do_bad , SIGBUS , 0 , " unknown 26 " } ,
{ do_bad , SIGBUS , 0 , " unknown 27 " } ,
{ do_bad , SIGBUS , 0 , " unknown 28 " } ,
{ do_bad , SIGBUS , 0 , " unknown 29 " } ,
{ do_bad , SIGBUS , 0 , " unknown 30 " } ,
{ do_bad , SIGBUS , 0 , " unknown 31 " } ,
} ;
2010-09-03 13:39:59 +04:00
void __init
hook_ifault_code ( int nr , int ( * fn ) ( unsigned long , unsigned int , struct pt_regs * ) ,
int sig , int code , const char * name )
{
if ( nr < 0 | | nr > = ARRAY_SIZE ( ifsr_info ) )
BUG ( ) ;
ifsr_info [ nr ] . fn = fn ;
ifsr_info [ nr ] . sig = sig ;
ifsr_info [ nr ] . code = code ;
ifsr_info [ nr ] . name = name ;
}
2007-03-02 18:01:36 +03:00
asmlinkage void __exception
2009-09-25 16:39:47 +04:00
do_PrefetchAbort ( unsigned long addr , unsigned int ifsr , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2009-09-25 16:40:49 +04:00
const struct fsr_info * inf = ifsr_info + fsr_fs ( ifsr ) ;
struct siginfo info ;
if ( ! inf - > fn ( addr , ifsr | FSR_LNX_PF , regs ) )
return ;
printk ( KERN_ALERT " Unhandled prefetch abort: %s (0x%03x) at 0x%08lx \n " ,
inf - > name , ifsr , addr ) ;
info . si_signo = inf - > sig ;
info . si_errno = 0 ;
info . si_code = inf - > code ;
info . si_addr = ( void __user * ) addr ;
arm_notify_die ( " " , regs , & info , ifsr , 0 ) ;
2005-04-17 02:20:36 +04:00
}
2010-07-22 16:23:25 +04:00
static int __init exceptions_init ( void )
{
if ( cpu_architecture ( ) > = CPU_ARCH_ARMv6 ) {
hook_fault_code ( 4 , do_translation_fault , SIGSEGV , SEGV_MAPERR ,
" I-cache maintenance fault " ) ;
}
2010-07-26 14:20:41 +04:00
if ( cpu_architecture ( ) > = CPU_ARCH_ARMv7 ) {
/*
* TODO : Access flag faults introduced in ARMv6K .
* Runtime check for ' K ' extension is needed
*/
hook_fault_code ( 3 , do_bad , SIGSEGV , SEGV_MAPERR ,
" section access flag fault " ) ;
hook_fault_code ( 6 , do_bad , SIGSEGV , SEGV_MAPERR ,
" section access flag fault " ) ;
}
2010-07-22 16:23:25 +04:00
return 0 ;
}
arch_initcall ( exceptions_init ) ;