2019-05-23 11:14:57 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2017-07-10 18:06:09 -07:00
/*
* Copyright ( C ) 2009 Sunplus Core Technology Co . , Ltd .
* Lennox Wu < lennox . wu @ sunplusct . com >
* Chen Liqin < liqin . chen @ sunplusct . com >
* Copyright ( C ) 2012 Regents of the University of California
*/
# include <linux/mm.h>
# include <linux/kernel.h>
# include <linux/interrupt.h>
# include <linux/perf_event.h>
# include <linux/signal.h>
# include <linux/uaccess.h>
# include <asm/ptrace.h>
2019-06-17 12:26:17 +08:00
# include <asm/tlbflush.h>
2017-07-10 18:06:09 -07:00
2019-10-17 15:00:17 -07:00
# include "../kernel/head.h"
2020-08-25 18:38:58 +03:00
static inline void no_context ( struct pt_regs * regs , unsigned long addr )
{
/* Are we prepared to handle this kernel fault? */
if ( fixup_exception ( regs ) )
return ;
/*
* Oops . The kernel tried to access some bad page . We ' ll have to
* terminate things with extreme prejudice .
*/
bust_spinlocks ( 1 ) ;
pr_alert ( " Unable to handle kernel %s at virtual address " REG_FMT " \n " ,
( addr < PAGE_SIZE ) ? " NULL pointer dereference " :
" paging request " , addr ) ;
die ( regs , " Oops " ) ;
do_exit ( SIGKILL ) ;
}
2020-08-25 19:04:09 +03:00
static inline void mm_fault_error ( struct pt_regs * regs , unsigned long addr , vm_fault_t fault )
{
2020-08-25 19:05:44 +03:00
if ( fault & VM_FAULT_OOM ) {
/*
* We ran out of memory , call the OOM killer , and return the userspace
* ( which will retry the fault , or kill us if we got oom - killed ) .
*/
if ( ! user_mode ( regs ) ) {
no_context ( regs , addr ) ;
return ;
}
pagefault_out_of_memory ( ) ;
2020-08-25 19:04:09 +03:00
return ;
2020-08-25 19:05:44 +03:00
} else if ( fault & VM_FAULT_SIGBUS ) {
/* Kernel mode? Handle exceptions or die */
if ( ! user_mode ( regs ) ) {
no_context ( regs , addr ) ;
return ;
}
do_trap ( regs , SIGBUS , BUS_ADRERR , addr ) ;
2020-08-25 19:04:09 +03:00
return ;
}
2020-08-25 19:05:44 +03:00
BUG ( ) ;
2020-08-25 19:04:09 +03:00
}
2020-08-25 18:48:01 +03:00
static inline void bad_area ( struct pt_regs * regs , struct mm_struct * mm , int code , unsigned long addr )
{
/*
* Something tried to access memory that isn ' t in our memory map .
* Fix it , but check if it ' s kernel or user first .
*/
mmap_read_unlock ( mm ) ;
/* User mode accesses just cause a SIGSEGV */
if ( user_mode ( regs ) ) {
do_trap ( regs , SIGSEGV , code , addr ) ;
return ;
}
no_context ( regs , addr ) ;
}
2020-08-25 18:54:26 +03:00
static void inline vmalloc_fault ( struct pt_regs * regs , int code , unsigned long addr )
{
pgd_t * pgd , * pgd_k ;
pud_t * pud , * pud_k ;
p4d_t * p4d , * p4d_k ;
pmd_t * pmd , * pmd_k ;
pte_t * pte_k ;
int index ;
/* User mode accesses just cause a SIGSEGV */
if ( user_mode ( regs ) )
return do_trap ( regs , SIGSEGV , code , addr ) ;
/*
* Synchronize this task ' s top level page - table
* with the ' reference ' page table .
*
* Do _not_ use " tsk->active_mm->pgd " here .
* We might be inside an interrupt in the middle
* of a task switch .
*/
index = pgd_index ( addr ) ;
pgd = ( pgd_t * ) pfn_to_virt ( csr_read ( CSR_SATP ) ) + index ;
pgd_k = init_mm . pgd + index ;
if ( ! pgd_present ( * pgd_k ) ) {
no_context ( regs , addr ) ;
return ;
}
set_pgd ( pgd , * pgd_k ) ;
p4d = p4d_offset ( pgd , addr ) ;
p4d_k = p4d_offset ( pgd_k , addr ) ;
if ( ! p4d_present ( * p4d_k ) ) {
no_context ( regs , addr ) ;
return ;
}
pud = pud_offset ( p4d , addr ) ;
pud_k = pud_offset ( p4d_k , addr ) ;
if ( ! pud_present ( * pud_k ) ) {
no_context ( regs , addr ) ;
return ;
}
/*
* Since the vmalloc area is global , it is unnecessary
* to copy individual PTEs
*/
pmd = pmd_offset ( pud , addr ) ;
pmd_k = pmd_offset ( pud_k , addr ) ;
if ( ! pmd_present ( * pmd_k ) ) {
no_context ( regs , addr ) ;
return ;
}
set_pmd ( pmd , * pmd_k ) ;
/*
* Make sure the actual PTE exists as well to
* catch kernel vmalloc - area accesses to non - mapped
* addresses . If we don ' t do this , this will just
* silently loop forever .
*/
pte_k = pte_offset_kernel ( pmd_k , addr ) ;
if ( ! pte_present ( * pte_k ) ) {
no_context ( regs , addr ) ;
return ;
}
/*
* The kernel assumes that TLBs don ' t cache invalid
* entries , but in RISC - V , SFENCE . VMA specifies an
* ordering constraint , not a cache flush ; it is
* necessary even after writing invalid entries .
*/
local_flush_tlb_page ( addr ) ;
}
2017-07-10 18:06:09 -07:00
/*
* This routine handles page faults . It determines the address and the
* problem , and then passes it off to one of the appropriate routines .
*/
asmlinkage void do_page_fault ( struct pt_regs * regs )
{
struct task_struct * tsk ;
struct vm_area_struct * vma ;
struct mm_struct * mm ;
unsigned long addr , cause ;
2020-04-01 21:08:37 -07:00
unsigned int flags = FAULT_FLAG_DEFAULT ;
2018-08-17 15:44:47 -07:00
int code = SEGV_MAPERR ;
vm_fault_t fault ;
2017-07-10 18:06:09 -07:00
2019-10-28 13:10:32 +01:00
cause = regs - > cause ;
addr = regs - > badaddr ;
2017-07-10 18:06:09 -07:00
tsk = current ;
mm = tsk - > mm ;
/*
* Fault - in kernel - space virtual memory on - demand .
* The ' reference ' page table is init_mm . pgd .
*
* NOTE ! We MUST NOT take any locks for this case . We may
* be in an interrupt or a critical region , and should
* only copy the information from the master page table ,
* nothing more .
*/
2020-08-25 18:54:26 +03:00
if ( unlikely ( ( addr > = VMALLOC_START ) & & ( addr < = VMALLOC_END ) ) ) {
vmalloc_fault ( regs , code , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
/* Enable interrupts if they were enabled in the parent context. */
2019-10-28 13:10:32 +01:00
if ( likely ( regs - > status & SR_PIE ) )
2017-07-10 18:06:09 -07:00
local_irq_enable ( ) ;
/*
* If we ' re in an interrupt , have no user context , or are running
* in an atomic region , then we must not take the fault .
*/
2020-08-25 18:38:58 +03:00
if ( unlikely ( faulthandler_disabled ( ) | | ! mm ) ) {
no_context ( regs , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
if ( user_mode ( regs ) )
flags | = FAULT_FLAG_USER ;
perf_sw_event ( PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , addr ) ;
2020-08-25 19:42:54 +03:00
if ( cause = = EXC_STORE_PAGE_FAULT )
flags | = FAULT_FLAG_WRITE ;
2017-07-10 18:06:09 -07:00
retry :
2020-06-08 21:33:25 -07:00
mmap_read_lock ( mm ) ;
2017-07-10 18:06:09 -07:00
vma = find_vma ( mm , addr ) ;
2020-08-25 18:48:01 +03:00
if ( unlikely ( ! vma ) ) {
bad_area ( regs , mm , code , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
if ( likely ( vma - > vm_start < = addr ) )
goto good_area ;
2020-08-25 18:48:01 +03:00
if ( unlikely ( ! ( vma - > vm_flags & VM_GROWSDOWN ) ) ) {
bad_area ( regs , mm , code , addr ) ;
return ;
}
if ( unlikely ( expand_stack ( vma , addr ) ) ) {
bad_area ( regs , mm , code , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
/*
* Ok , we have a good vm_area for this memory access , so
* we can handle it .
*/
good_area :
code = SEGV_ACCERR ;
switch ( cause ) {
case EXC_INST_PAGE_FAULT :
2020-08-25 18:48:01 +03:00
if ( ! ( vma - > vm_flags & VM_EXEC ) ) {
bad_area ( regs , mm , code , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
break ;
case EXC_LOAD_PAGE_FAULT :
2020-08-25 18:48:01 +03:00
if ( ! ( vma - > vm_flags & VM_READ ) ) {
bad_area ( regs , mm , code , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
break ;
case EXC_STORE_PAGE_FAULT :
2020-08-25 18:48:01 +03:00
if ( ! ( vma - > vm_flags & VM_WRITE ) ) {
bad_area ( regs , mm , code , addr ) ;
return ;
}
2017-07-10 18:06:09 -07:00
break ;
default :
panic ( " %s: unhandled cause %lu " , __func__ , cause ) ;
}
/*
* If for any reason at all we could not handle the fault ,
* make sure we exit gracefully rather than endlessly redo
* the fault .
*/
2020-08-11 18:38:34 -07:00
fault = handle_mm_fault ( vma , addr , flags , regs ) ;
2017-07-10 18:06:09 -07:00
/*
* If we need to retry but a fatal signal is pending , handle the
2020-06-08 21:33:54 -07:00
* signal first . We do not need to release the mmap_lock because it
2017-07-10 18:06:09 -07:00
* would already be released in __lock_page_or_retry in mm / filemap . c .
*/
2020-04-01 21:08:06 -07:00
if ( fault_signal_pending ( fault , regs ) )
2017-07-10 18:06:09 -07:00
return ;
2020-08-19 17:10:11 +03:00
if ( unlikely ( ( fault & VM_FAULT_RETRY ) & & ( flags & FAULT_FLAG_ALLOW_RETRY ) ) ) {
flags | = FAULT_FLAG_TRIED ;
/*
* No need to mmap_read_unlock ( mm ) as we would
* have already released it in __lock_page_or_retry
* in mm / filemap . c .
*/
goto retry ;
2017-07-10 18:06:09 -07:00
}
2020-06-08 21:33:25 -07:00
mmap_read_unlock ( mm ) ;
2020-08-25 19:01:43 +03:00
if ( unlikely ( fault & VM_FAULT_ERROR ) ) {
2020-08-25 19:04:09 +03:00
mm_fault_error ( regs , addr , fault ) ;
2020-08-25 18:38:58 +03:00
return ;
}
2017-07-10 18:06:09 -07:00
return ;
}