2019-05-29 17:12:41 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-11-01 03:54:08 +04:00
/*
* Memory fault handling for Hexagon
*
2012-09-20 01:22:02 +04:00
* Copyright ( c ) 2010 - 2011 , The Linux Foundation . All rights reserved .
2011-11-01 03:54:08 +04:00
*/
/*
* Page fault handling for the Hexagon Virtual Machine .
* Can also be called by a native port emulating the HVM
* execptions .
*/
# include <asm/pgtable.h>
# include <asm/traps.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2011-11-01 03:54:08 +04:00
# include <linux/mm.h>
2017-02-08 20:51:30 +03:00
# include <linux/sched/signal.h>
2011-11-01 03:54:08 +04:00
# include <linux/signal.h>
2017-01-10 22:13:29 +03:00
# include <linux/extable.h>
2011-11-01 03:54:08 +04:00
# include <linux/hardirq.h>
/*
* Decode of hardware exception sends us to one of several
* entry points . At each , we generate canonical arguments
* for handling by the abstract memory management code .
*/
# define FLT_IFETCH -1
# define FLT_LOAD 0
# define FLT_STORE 1
/*
* Canonical page fault handler
*/
void do_page_fault ( unsigned long address , long cause , struct pt_regs * regs )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
2018-04-16 19:26:58 +03:00
int si_signo ;
2011-11-01 03:54:08 +04:00
int si_code = SEGV_MAPERR ;
2018-08-18 01:44:47 +03:00
vm_fault_t fault ;
2011-11-01 03:54:08 +04:00
const struct exception_table_entry * fixup ;
2020-04-02 07:08:37 +03:00
unsigned int flags = FAULT_FLAG_DEFAULT ;
2011-11-01 03:54:08 +04:00
/*
* If we ' re in an interrupt or have no user context ,
* then must not take the fault .
*/
if ( unlikely ( in_interrupt ( ) | | ! mm ) )
goto no_context ;
local_irq_enable ( ) ;
2013-09-13 02:13:39 +04:00
if ( user_mode ( regs ) )
flags | = FAULT_FLAG_USER ;
2012-03-20 17:23:33 +04:00
retry :
2011-11-01 03:54:08 +04:00
down_read ( & mm - > mmap_sem ) ;
vma = find_vma ( mm , address ) ;
if ( ! vma )
goto bad_area ;
if ( vma - > vm_start < = address )
goto good_area ;
if ( ! ( vma - > vm_flags & VM_GROWSDOWN ) )
goto bad_area ;
if ( expand_stack ( vma , address ) )
goto bad_area ;
good_area :
/* Address space is OK. Now check access rights. */
si_code = SEGV_ACCERR ;
switch ( cause ) {
case FLT_IFETCH :
if ( ! ( vma - > vm_flags & VM_EXEC ) )
goto bad_area ;
break ;
case FLT_LOAD :
if ( ! ( vma - > vm_flags & VM_READ ) )
goto bad_area ;
break ;
case FLT_STORE :
if ( ! ( vma - > vm_flags & VM_WRITE ) )
goto bad_area ;
2013-09-13 02:13:39 +04:00
flags | = FAULT_FLAG_WRITE ;
2011-11-01 03:54:08 +04:00
break ;
}
2016-07-27 01:25:18 +03:00
fault = handle_mm_fault ( vma , address , flags ) ;
2012-03-20 17:23:33 +04:00
2020-04-02 07:08:06 +03:00
if ( fault_signal_pending ( fault , regs ) )
2012-03-20 17:23:33 +04:00
return ;
2011-11-01 03:54:08 +04:00
/* The most common case -- we are done. */
if ( likely ( ! ( fault & VM_FAULT_ERROR ) ) ) {
2012-03-20 17:23:33 +04:00
if ( flags & FAULT_FLAG_ALLOW_RETRY ) {
if ( fault & VM_FAULT_MAJOR )
current - > maj_flt + + ;
else
current - > min_flt + + ;
if ( fault & VM_FAULT_RETRY ) {
2012-10-09 03:32:19 +04:00
flags | = FAULT_FLAG_TRIED ;
2012-03-20 17:23:33 +04:00
goto retry ;
}
}
2011-11-01 03:54:08 +04:00
up_read ( & mm - > mmap_sem ) ;
return ;
}
up_read ( & mm - > mmap_sem ) ;
/* Handle copyin/out exception cases */
if ( ! user_mode ( regs ) )
goto no_context ;
if ( fault & VM_FAULT_OOM ) {
pagefault_out_of_memory ( ) ;
return ;
}
/* User-mode address is in the memory map, but we are
* unable to fix up the page fault .
*/
if ( fault & VM_FAULT_SIGBUS ) {
2018-04-16 19:26:58 +03:00
si_signo = SIGBUS ;
si_code = BUS_ADRERR ;
2011-11-01 03:54:08 +04:00
}
/* Address is not in the memory map */
else {
2018-04-16 19:26:58 +03:00
si_signo = SIGSEGV ;
si_code = SEGV_ACCERR ;
2011-11-01 03:54:08 +04:00
}
2019-05-23 19:04:24 +03:00
force_sig_fault ( si_signo , si_code , ( void __user * ) address ) ;
2011-11-01 03:54:08 +04:00
return ;
bad_area :
up_read ( & mm - > mmap_sem ) ;
if ( user_mode ( regs ) ) {
2019-05-23 19:04:24 +03:00
force_sig_fault ( SIGSEGV , si_code , ( void __user * ) address ) ;
2011-11-01 03:54:08 +04:00
return ;
}
/* Kernel-mode fault falls through */
no_context :
fixup = search_exception_tables ( pt_elr ( regs ) ) ;
if ( fixup ) {
pt_set_elr ( regs , fixup - > fixup ) ;
return ;
}
/* Things are looking very, very bad now */
bust_spinlocks ( 1 ) ;
printk ( KERN_EMERG " Unable to handle kernel paging request at "
" virtual address 0x%08lx, regs %p \n " , address , regs ) ;
die ( " Bad Kernel VA " , regs , SIGKILL ) ;
}
void read_protection_fault ( struct pt_regs * regs )
{
unsigned long badvadr = pt_badva ( regs ) ;
do_page_fault ( badvadr , FLT_LOAD , regs ) ;
}
void write_protection_fault ( struct pt_regs * regs )
{
unsigned long badvadr = pt_badva ( regs ) ;
do_page_fault ( badvadr , FLT_STORE , regs ) ;
}
void execute_protection_fault ( struct pt_regs * regs )
{
unsigned long badvadr = pt_badva ( regs ) ;
do_page_fault ( badvadr , FLT_IFETCH , regs ) ;
}