2018-09-05 09:25:08 +03:00
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
# include <linux/signal.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/ptrace.h>
# include <linux/mman.h>
# include <linux/mm.h>
# include <linux/smp.h>
# include <linux/version.h>
# include <linux/vt_kern.h>
# include <linux/extable.h>
# include <linux/uaccess.h>
2019-04-18 09:20:40 +03:00
# include <linux/perf_event.h>
2018-09-05 09:25:08 +03:00
# include <asm/hardirq.h>
# include <asm/mmu_context.h>
# include <asm/traps.h>
# include <asm/page.h>
int fixup_exception ( struct pt_regs * regs )
{
const struct exception_table_entry * fixup ;
fixup = search_exception_tables ( instruction_pointer ( regs ) ) ;
if ( fixup ) {
regs - > pc = fixup - > nextinsn ;
return 1 ;
}
return 0 ;
}
/*
* This routine handles page faults . It determines the address ,
* and the problem , and then passes it off to one of the appropriate
* routines .
*/
asmlinkage void do_page_fault ( struct pt_regs * regs , unsigned long write ,
unsigned long mmu_meh )
{
struct vm_area_struct * vma = NULL ;
struct task_struct * tsk = current ;
struct mm_struct * mm = tsk - > mm ;
int si_code ;
int fault ;
unsigned long address = mmu_meh & PAGE_MASK ;
si_code = SEGV_MAPERR ;
# ifndef CONFIG_CPU_HAS_TLBI
/*
* We fault - in kernel - space virtual memory on - demand . The
* ' reference ' page table is init_mm . pgd .
*
* NOTE ! We MUST NOT take any locks for this case . We may
* be in an interrupt or a critical region , and should
* only copy the information from the master page table ,
* nothing more .
*/
if ( unlikely ( address > = VMALLOC_START ) & &
unlikely ( address < = VMALLOC_END ) ) {
/*
* Synchronize this task ' s top level page - table
* with the ' reference ' page table .
*
* Do _not_ use " tsk " here . We might be inside
* an interrupt in the middle of a task switch . .
*/
int offset = __pgd_offset ( address ) ;
pgd_t * pgd , * pgd_k ;
pud_t * pud , * pud_k ;
pmd_t * pmd , * pmd_k ;
pte_t * pte_k ;
unsigned long pgd_base ;
2019-04-22 09:21:09 +03:00
pgd_base = ( unsigned long ) __va ( get_pgd ( ) ) ;
2018-09-05 09:25:08 +03:00
pgd = ( pgd_t * ) pgd_base + offset ;
pgd_k = init_mm . pgd + offset ;
if ( ! pgd_present ( * pgd_k ) )
goto no_context ;
set_pgd ( pgd , * pgd_k ) ;
pud = ( pud_t * ) pgd ;
pud_k = ( pud_t * ) pgd_k ;
if ( ! pud_present ( * pud_k ) )
goto no_context ;
pmd = pmd_offset ( pud , address ) ;
pmd_k = pmd_offset ( pud_k , address ) ;
if ( ! pmd_present ( * pmd_k ) )
goto no_context ;
set_pmd ( pmd , * pmd_k ) ;
pte_k = pte_offset_kernel ( pmd_k , address ) ;
if ( ! pte_present ( * pte_k ) )
goto no_context ;
return ;
}
# endif
2019-04-18 09:20:40 +03:00
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , address ) ;
2018-09-05 09:25:08 +03:00
/*
* If we ' re in an interrupt or have no user
* context , we must not take the fault . .
*/
if ( in_atomic ( ) | | ! mm )
goto bad_area_nosemaphore ;
down_read ( & mm - > mmap_sem ) ;
vma = find_vma ( mm , address ) ;
if ( ! vma )
goto bad_area ;
if ( vma - > vm_start < = address )
goto good_area ;
if ( ! ( vma - > vm_flags & VM_GROWSDOWN ) )
goto bad_area ;
if ( expand_stack ( vma , address ) )
goto bad_area ;
/*
* Ok , we have a good vm_area for this memory access , so
* we can handle it . .
*/
good_area :
si_code = SEGV_ACCERR ;
if ( write ) {
if ( ! ( vma - > vm_flags & VM_WRITE ) )
goto bad_area ;
} else {
if ( ! ( vma - > vm_flags & ( VM_READ | VM_WRITE | VM_EXEC ) ) )
goto bad_area ;
}
/*
* If for any reason at all we couldn ' t handle the fault ,
* make sure we exit gracefully rather than endlessly redo
* the fault .
*/
fault = handle_mm_fault ( vma , address , write ? FAULT_FLAG_WRITE : 0 ) ;
if ( unlikely ( fault & VM_FAULT_ERROR ) ) {
if ( fault & VM_FAULT_OOM )
goto out_of_memory ;
else if ( fault & VM_FAULT_SIGBUS )
goto do_sigbus ;
else if ( fault & VM_FAULT_SIGSEGV )
goto bad_area ;
BUG ( ) ;
}
2019-04-18 09:20:40 +03:00
if ( fault & VM_FAULT_MAJOR ) {
2018-09-05 09:25:08 +03:00
tsk - > maj_flt + + ;
2019-04-18 09:20:40 +03:00
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MAJ , 1 , regs ,
address ) ;
} else {
2018-09-05 09:25:08 +03:00
tsk - > min_flt + + ;
2019-04-18 09:20:40 +03:00
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS_MIN , 1 , regs ,
address ) ;
}
2018-09-05 09:25:08 +03:00
up_read ( & mm - > mmap_sem ) ;
return ;
/*
* Something tried to access memory that isn ' t in our memory map . .
* Fix it , but check if it ' s kernel or user first . .
*/
bad_area :
up_read ( & mm - > mmap_sem ) ;
bad_area_nosemaphore :
/* User mode accesses just cause a SIGSEGV */
if ( user_mode ( regs ) ) {
2019-05-23 19:04:24 +03:00
force_sig_fault ( SIGSEGV , si_code , ( void __user * ) address ) ;
2018-09-05 09:25:08 +03:00
return ;
}
no_context :
/* Are we prepared to handle this kernel fault? */
if ( fixup_exception ( regs ) )
return ;
/*
* Oops . The kernel tried to access some bad page . We ' ll have to
* terminate things with extreme prejudice .
*/
bust_spinlocks ( 1 ) ;
2018-12-09 12:07:20 +03:00
pr_alert ( " Unable to handle kernel paging request at virtual "
" address 0x%08lx, pc: 0x%08lx \n " , address , regs - > pc ) ;
2018-09-05 09:25:08 +03:00
die_if_kernel ( " Oops " , regs , write ) ;
out_of_memory :
/*
* We ran out of memory , call the OOM killer , and return the userspace
* ( which will retry the fault , or kill us if we got oom - killed ) .
*/
pagefault_out_of_memory ( ) ;
return ;
do_sigbus :
up_read ( & mm - > mmap_sem ) ;
/* Kernel mode? Handle exceptions or die */
if ( ! user_mode ( regs ) )
goto no_context ;
2019-05-23 19:04:24 +03:00
force_sig_fault ( SIGBUS , BUS_ADRERR , ( void __user * ) address ) ;
2018-09-05 09:25:08 +03:00
}