2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 1995 Linus Torvalds
*/
# include <linux/signal.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/ptrace.h>
# include <linux/mman.h>
# include <linux/mm.h>
# include <linux/smp.h>
# include <linux/interrupt.h>
# include <linux/init.h>
# include <linux/tty.h>
# include <linux/vt_kern.h> /* For unblank_screen() */
# include <linux/highmem.h>
2007-05-02 19:27:04 +02:00
# include <linux/bootmem.h> /* for max_low_pfn */
2007-05-08 00:27:03 -07:00
# include <linux/vmalloc.h>
2005-04-16 15:20:36 -07:00
# include <linux/module.h>
2005-09-06 15:19:27 -07:00
# include <linux/kprobes.h>
2006-12-07 02:14:06 +01:00
# include <linux/uaccess.h>
2007-05-08 00:27:03 -07:00
# include <linux/kdebug.h>
2005-04-16 15:20:36 -07:00
# include <asm/system.h>
# include <asm/desc.h>
2006-09-26 10:52:39 +02:00
# include <asm/segment.h>
2005-04-16 15:20:36 -07:00
2008-01-30 13:32:19 +01:00
/*
* Page fault error code bits
* bit 0 = = 0 means no page found , 1 means protection fault
* bit 1 = = 0 means read , 1 means write
* bit 2 = = 0 means kernel , 1 means user - mode
* bit 3 = = 1 means use of reserved bit detected
* bit 4 = = 1 means fault was an instruction fetch
*/
2008-01-30 13:33:13 +01:00
# define PF_PROT (1<<0)
2008-01-30 13:32:19 +01:00
# define PF_WRITE (1<<1)
2008-01-30 13:33:13 +01:00
# define PF_USER (1<<2)
# define PF_RSVD (1<<3)
2008-01-30 13:32:19 +01:00
# define PF_INSTR (1<<4)
2007-10-16 01:24:07 -07:00
static inline int notify_page_fault ( struct pt_regs * regs )
2006-06-26 00:25:25 -07:00
{
2008-01-30 13:32:19 +01:00
# ifdef CONFIG_KPROBES
2007-10-16 01:24:07 -07:00
int ret = 0 ;
/* kprobe_running() needs smp_processor_id() */
if ( ! user_mode_vm ( regs ) ) {
preempt_disable ( ) ;
if ( kprobe_running ( ) & & kprobe_fault_handler ( regs , 14 ) )
ret = 1 ;
preempt_enable ( ) ;
}
2006-06-26 00:25:25 -07:00
2007-10-16 01:24:07 -07:00
return ret ;
# else
return 0 ;
# endif
2008-01-30 13:32:19 +01:00
}
2006-06-26 00:25:25 -07:00
2008-01-30 13:32:19 +01:00
/*
2008-01-30 13:32:35 +01:00
* X86_32
2005-04-16 15:20:36 -07:00
* Sometimes AMD Athlon / Opteron CPUs report invalid exceptions on prefetch .
* Check that here and ignore it .
2008-01-30 13:32:35 +01:00
*
* X86_64
* Sometimes the CPU reports invalid exceptions on prefetch .
* Check that here and ignore it .
*
* Opcode checker based on code by Richard Brunner
2005-04-16 15:20:36 -07:00
*/
2008-01-30 13:32:35 +01:00
static int is_prefetch ( struct pt_regs * regs , unsigned long addr ,
unsigned long error_code )
2008-01-30 13:32:19 +01:00
{
2008-01-30 13:32:35 +01:00
unsigned char * instr ;
2005-04-16 15:20:36 -07:00
int scan_more = 1 ;
2008-01-30 13:32:19 +01:00
int prefetch = 0 ;
2008-01-30 13:32:35 +01:00
unsigned char * max_instr ;
# ifdef CONFIG_X86_32
if ( unlikely ( boot_cpu_data . x86_vendor = = X86_VENDOR_AMD & &
boot_cpu_data . x86 > = 6 ) ) {
/* Catch an obscure case of prefetch inside an NX page. */
if ( nx_enabled & & ( error_code & PF_INSTR ) )
return 0 ;
} else {
return 0 ;
}
# else
/* If it was a exec fault ignore */
if ( error_code & PF_INSTR )
return 0 ;
# endif
2005-04-16 15:20:36 -07:00
2008-01-30 13:33:12 +01:00
instr = ( unsigned char * ) convert_ip_to_linear ( current , regs ) ;
2008-01-30 13:32:35 +01:00
max_instr = instr + 15 ;
if ( user_mode ( regs ) & & instr > = ( unsigned char * ) TASK_SIZE )
return 0 ;
while ( scan_more & & instr < max_instr ) {
2005-04-16 15:20:36 -07:00
unsigned char opcode ;
unsigned char instr_hi ;
unsigned char instr_lo ;
2006-12-07 02:14:06 +01:00
if ( probe_kernel_address ( instr , opcode ) )
2008-01-30 13:32:19 +01:00
break ;
2005-04-16 15:20:36 -07:00
2008-01-30 13:32:19 +01:00
instr_hi = opcode & 0xf0 ;
instr_lo = opcode & 0x0f ;
2005-04-16 15:20:36 -07:00
instr + + ;
2008-01-30 13:32:19 +01:00
switch ( instr_hi ) {
2005-04-16 15:20:36 -07:00
case 0x20 :
case 0x30 :
2008-01-30 13:32:19 +01:00
/*
* Values 0x26 , 0x2E , 0x36 , 0x3E are valid x86 prefixes .
* In X86_64 long mode , the CPU will signal invalid
* opcode if some of these prefixes are present so
* X86_64 will never get here anyway
*/
2005-04-16 15:20:36 -07:00
scan_more = ( ( instr_lo & 7 ) = = 0x6 ) ;
break ;
2008-01-30 13:32:19 +01:00
# ifdef CONFIG_X86_64
case 0x40 :
/*
* In AMD64 long mode 0x40 . .0 x4F are valid REX prefixes
* Need to figure out under what instruction mode the
* instruction was issued . Could check the LDT for lm ,
* but for now it ' s good enough to assume that long
* mode only uses well known segments or kernel .
*/
scan_more = ( ! user_mode ( regs ) ) | | ( regs - > cs = = __USER_CS ) ;
break ;
# endif
2005-04-16 15:20:36 -07:00
case 0x60 :
/* 0x64 thru 0x67 are valid prefixes in all modes. */
scan_more = ( instr_lo & 0xC ) = = 0x4 ;
2008-01-30 13:32:19 +01:00
break ;
2005-04-16 15:20:36 -07:00
case 0xF0 :
2008-01-30 13:32:19 +01:00
/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
2005-04-16 15:20:36 -07:00
scan_more = ! instr_lo | | ( instr_lo > > 1 ) = = 1 ;
2008-01-30 13:32:19 +01:00
break ;
2005-04-16 15:20:36 -07:00
case 0x00 :
/* Prefetch instruction is 0x0F0D or 0x0F18 */
scan_more = 0 ;
2008-01-30 13:33:12 +01:00
2006-12-07 02:14:06 +01:00
if ( probe_kernel_address ( instr , opcode ) )
2005-04-16 15:20:36 -07:00
break ;
prefetch = ( instr_lo = = 0xF ) & &
( opcode = = 0x0D | | opcode = = 0x18 ) ;
2008-01-30 13:32:19 +01:00
break ;
2005-04-16 15:20:36 -07:00
default :
scan_more = 0 ;
break ;
2008-01-30 13:32:19 +01:00
}
2005-04-16 15:20:36 -07:00
}
return prefetch ;
}
2008-01-30 13:32:35 +01:00
static void force_sig_info_fault ( int si_signo , int si_code ,
2005-09-03 15:56:26 -07:00
unsigned long address , struct task_struct * tsk )
{
siginfo_t info ;
info . si_signo = si_signo ;
info . si_errno = 0 ;
info . si_code = si_code ;
info . si_addr = ( void __user * ) address ;
force_sig_info ( si_signo , & info , tsk ) ;
}
2008-01-30 13:33:42 +01:00
void dump_pagetable ( unsigned long address )
{
__typeof__ ( pte_val ( __pte ( 0 ) ) ) page ;
page = read_cr3 ( ) ;
page = ( ( __typeof__ ( page ) * ) __va ( page ) ) [ address > > PGDIR_SHIFT ] ;
# ifdef CONFIG_X86_PAE
printk ( " *pdpt = %016Lx " , page ) ;
if ( ( page > > PAGE_SHIFT ) < max_low_pfn
& & page & _PAGE_PRESENT ) {
page & = PAGE_MASK ;
page = ( ( __typeof__ ( page ) * ) __va ( page ) ) [ ( address > > PMD_SHIFT )
& ( PTRS_PER_PMD - 1 ) ] ;
printk ( KERN_CONT " *pde = %016Lx " , page ) ;
page & = ~ _PAGE_NX ;
}
# else
printk ( " *pde = %08lx " , page ) ;
# endif
/*
* We must not directly access the pte in the highpte
* case if the page table is located in highmem .
* And let ' s rather not kmap - atomic the pte , just in case
* it ' s allocated already .
*/
if ( ( page > > PAGE_SHIFT ) < max_low_pfn
& & ( page & _PAGE_PRESENT )
& & ! ( page & _PAGE_PSE ) ) {
page & = PAGE_MASK ;
page = ( ( __typeof__ ( page ) * ) __va ( page ) ) [ ( address > > PAGE_SHIFT )
& ( PTRS_PER_PTE - 1 ) ] ;
printk ( " *pte = %0*Lx " , sizeof ( page ) * 2 , ( u64 ) page ) ;
}
printk ( " \n " ) ;
}
2008-01-30 13:31:17 +01:00
void do_invalid_op ( struct pt_regs * , unsigned long ) ;
2005-04-16 15:20:36 -07:00
2006-03-23 02:59:45 -08:00
static inline pmd_t * vmalloc_sync_one ( pgd_t * pgd , unsigned long address )
{
unsigned index = pgd_index ( address ) ;
pgd_t * pgd_k ;
pud_t * pud , * pud_k ;
pmd_t * pmd , * pmd_k ;
pgd + = index ;
pgd_k = init_mm . pgd + index ;
if ( ! pgd_present ( * pgd_k ) )
return NULL ;
/*
* set_pgd ( pgd , * pgd_k ) ; here would be useless on PAE
* and redundant with the set_pmd ( ) on non - PAE . As would
* set_pud .
*/
pud = pud_offset ( pgd , address ) ;
pud_k = pud_offset ( pgd_k , address ) ;
if ( ! pud_present ( * pud_k ) )
return NULL ;
pmd = pmd_offset ( pud , address ) ;
pmd_k = pmd_offset ( pud_k , address ) ;
if ( ! pmd_present ( * pmd_k ) )
return NULL ;
2007-08-21 18:30:36 -07:00
if ( ! pmd_present ( * pmd ) ) {
2006-03-23 02:59:45 -08:00
set_pmd ( pmd , * pmd_k ) ;
2007-08-21 18:30:36 -07:00
arch_flush_lazy_mmu_mode ( ) ;
} else
2006-03-23 02:59:45 -08:00
BUG_ON ( pmd_page ( * pmd ) ! = pmd_page ( * pmd_k ) ) ;
return pmd_k ;
}
2008-01-30 13:32:35 +01:00
# ifdef CONFIG_X86_64
static const char errata93_warning [ ] =
KERN_ERR " ******* Your BIOS seems to not contain a fix for K8 errata #93 \n "
KERN_ERR " ******* Working around it, but it may cause SEGVs or burn power. \n "
KERN_ERR " ******* Please consider a BIOS update. \n "
KERN_ERR " ******* Disabling USB legacy in the BIOS may also help. \n " ;
2008-01-30 13:33:13 +01:00
# endif
2008-01-30 13:32:35 +01:00
/* Workaround for K8 erratum #93 & buggy BIOS.
BIOS SMM functions are required to use a specific workaround
to avoid corruption of the 64 bit RIP register on C stepping K8 .
A lot of BIOS that didn ' t get tested properly miss this .
The OS sees this as a page fault with the upper 32 bits of RIP cleared .
Try to work around it here .
2008-01-30 13:33:13 +01:00
Note we only handle faults in kernel here .
Does nothing for X86_32
*/
2008-01-30 13:32:35 +01:00
static int is_errata93 ( struct pt_regs * regs , unsigned long address )
{
2008-01-30 13:33:13 +01:00
# ifdef CONFIG_X86_64
2008-01-30 13:32:35 +01:00
static int warned ;
if ( address ! = regs - > ip )
return 0 ;
if ( ( address > > 32 ) ! = 0 )
return 0 ;
address | = 0xffffffffUL < < 32 ;
if ( ( address > = ( u64 ) _stext & & address < = ( u64 ) _etext ) | |
( address > = MODULES_VADDR & & address < = MODULES_END ) ) {
if ( ! warned ) {
printk ( errata93_warning ) ;
warned = 1 ;
}
regs - > ip = address ;
return 1 ;
}
2008-01-30 13:33:13 +01:00
# endif
2008-01-30 13:32:35 +01:00
return 0 ;
}
2008-01-30 13:33:13 +01:00
2008-01-30 13:32:35 +01:00
2006-03-23 02:59:45 -08:00
/*
* Handle a fault on the vmalloc or module mapping area
*
* This assumes no large pages in there .
*/
static inline int vmalloc_fault ( unsigned long address )
{
2008-01-30 13:33:13 +01:00
# ifdef CONFIG_X86_32
2006-03-23 02:59:45 -08:00
unsigned long pgd_paddr ;
pmd_t * pmd_k ;
pte_t * pte_k ;
/*
* Synchronize this task ' s top level page - table
* with the ' reference ' page table .
*
* Do _not_ use " current " here . We might be inside
* an interrupt in the middle of a task switch . .
*/
pgd_paddr = read_cr3 ( ) ;
pmd_k = vmalloc_sync_one ( __va ( pgd_paddr ) , address ) ;
if ( ! pmd_k )
return - 1 ;
pte_k = pte_offset_kernel ( pmd_k , address ) ;
if ( ! pte_present ( * pte_k ) )
return - 1 ;
return 0 ;
2008-01-30 13:33:13 +01:00
# else
pgd_t * pgd , * pgd_ref ;
pud_t * pud , * pud_ref ;
pmd_t * pmd , * pmd_ref ;
pte_t * pte , * pte_ref ;
/* Copy kernel mappings over when needed. This can also
happen within a race in page table update . In the later
case just flush . */
pgd = pgd_offset ( current - > mm ? : & init_mm , address ) ;
pgd_ref = pgd_offset_k ( address ) ;
if ( pgd_none ( * pgd_ref ) )
return - 1 ;
if ( pgd_none ( * pgd ) )
set_pgd ( pgd , * pgd_ref ) ;
else
BUG_ON ( pgd_page_vaddr ( * pgd ) ! = pgd_page_vaddr ( * pgd_ref ) ) ;
/* Below here mismatches are bugs because these lower tables
are shared */
pud = pud_offset ( pgd , address ) ;
pud_ref = pud_offset ( pgd_ref , address ) ;
if ( pud_none ( * pud_ref ) )
return - 1 ;
if ( pud_none ( * pud ) | | pud_page_vaddr ( * pud ) ! = pud_page_vaddr ( * pud_ref ) )
BUG ( ) ;
pmd = pmd_offset ( pud , address ) ;
pmd_ref = pmd_offset ( pud_ref , address ) ;
if ( pmd_none ( * pmd_ref ) )
return - 1 ;
if ( pmd_none ( * pmd ) | | pmd_page ( * pmd ) ! = pmd_page ( * pmd_ref ) )
BUG ( ) ;
pte_ref = pte_offset_kernel ( pmd_ref , address ) ;
if ( ! pte_present ( * pte_ref ) )
return - 1 ;
pte = pte_offset_kernel ( pmd , address ) ;
/* Don't use pte_page here, because the mappings can point
outside mem_map , and the NUMA hash lookup cannot handle
that . */
if ( ! pte_present ( * pte ) | | pte_pfn ( * pte ) ! = pte_pfn ( * pte_ref ) )
BUG ( ) ;
return 0 ;
# endif
2006-03-23 02:59:45 -08:00
}
2007-07-22 11:12:28 +02:00
int show_unhandled_signals = 1 ;
2005-04-16 15:20:36 -07:00
/*
* This routine handles page faults . It determines the address ,
* and the problem , and then passes it off to one of the appropriate
* routines .
*/
2008-01-30 13:31:17 +01:00
void __kprobes do_page_fault ( struct pt_regs * regs , unsigned long error_code )
2005-04-16 15:20:36 -07:00
{
struct task_struct * tsk ;
struct mm_struct * mm ;
2008-01-30 13:32:19 +01:00
struct vm_area_struct * vma ;
2005-04-16 15:20:36 -07:00
unsigned long address ;
2005-09-03 15:56:26 -07:00
int write , si_code ;
2007-07-19 01:47:05 -07:00
int fault ;
2005-04-16 15:20:36 -07:00
2007-10-25 14:01:10 +02:00
/*
* We can fault from pretty much anywhere , with unknown IRQ state .
*/
trace_hardirqs_fixup ( ) ;
2008-01-30 13:33:12 +01:00
tsk = current ;
mm = tsk - > mm ;
prefetchw ( & mm - > mmap_sem ) ;
2005-04-16 15:20:36 -07:00
/* get the address */
2008-01-30 13:32:19 +01:00
address = read_cr2 ( ) ;
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:26 -07:00
si_code = SEGV_MAPERR ;
2005-04-16 15:20:36 -07:00
2008-01-30 13:33:12 +01:00
if ( notify_page_fault ( regs ) )
return ;
2005-04-16 15:20:36 -07:00
/*
* We fault - in kernel - space virtual memory on - demand . The
* ' reference ' page table is init_mm . pgd .
*
* NOTE ! We MUST NOT take any locks for this case . We may
* be in an interrupt or a critical region , and should
* only copy the information from the master page table ,
* nothing more .
*
* This verifies that the fault happens in kernel space
* ( error_code & 4 ) = = 0 , and that the fault was not a
2006-03-23 02:59:45 -08:00
* protection error ( error_code & 9 ) = = 0.
2005-04-16 15:20:36 -07:00
*/
2006-03-23 02:59:45 -08:00
if ( unlikely ( address > = TASK_SIZE ) ) {
2008-01-30 13:32:59 +01:00
if ( ! ( error_code & ( PF_RSVD | PF_USER | PF_PROT ) ) & &
vmalloc_fault ( address ) > = 0 )
2006-03-23 02:59:45 -08:00
return ;
/*
2005-04-16 15:20:36 -07:00
* Don ' t take the mm semaphore here . If we fixup a prefetch
* fault we could otherwise deadlock .
*/
goto bad_area_nosemaphore ;
2006-03-23 02:59:45 -08:00
}
/* It's safe to allow irq's after cr2 has been saved and the vmalloc
fault has been handled . */
2008-01-30 13:30:56 +01:00
if ( regs - > flags & ( X86_EFLAGS_IF | VM_MASK ) )
2006-03-23 02:59:45 -08:00
local_irq_enable ( ) ;
2005-04-16 15:20:36 -07:00
/*
* If we ' re in an interrupt , have no user context or are running in an
2008-01-30 13:32:19 +01:00
* atomic region then we must not take the fault .
2005-04-16 15:20:36 -07:00
*/
if ( in_atomic ( ) | | ! mm )
goto bad_area_nosemaphore ;
/* When running in the kernel we expect faults to occur only to
* addresses in user space . All other faults represent errors in the
2007-10-20 01:13:56 +02:00
* kernel and should generate an OOPS . Unfortunately , in the case of an
2006-06-30 18:27:16 +02:00
* erroneous fault occurring in a code path which already holds mmap_sem
2005-04-16 15:20:36 -07:00
* we will deadlock attempting to validate the fault against the
* address space . Luckily the kernel only validly references user
* space from well defined areas of code , which are listed in the
* exceptions table .
*
* As the vast majority of faults will be valid we will only perform
2007-10-20 01:13:56 +02:00
* the source reference check when there is a possibility of a deadlock .
2005-04-16 15:20:36 -07:00
* Attempt to lock the address space , if we cannot we then validate the
* source . If this is invalid we can skip the address space check ,
* thus avoiding the deadlock .
*/
if ( ! down_read_trylock ( & mm - > mmap_sem ) ) {
2008-01-30 13:32:19 +01:00
if ( ( error_code & PF_USER ) = = 0 & &
2008-01-30 13:30:56 +01:00
! search_exception_tables ( regs - > ip ) )
2005-04-16 15:20:36 -07:00
goto bad_area_nosemaphore ;
down_read ( & mm - > mmap_sem ) ;
}
vma = find_vma ( mm , address ) ;
if ( ! vma )
goto bad_area ;
if ( vma - > vm_start < = address )
goto good_area ;
if ( ! ( vma - > vm_flags & VM_GROWSDOWN ) )
goto bad_area ;
2008-01-30 13:32:19 +01:00
if ( error_code & PF_USER ) {
2005-04-16 15:20:36 -07:00
/*
2008-01-30 13:30:56 +01:00
* Accessing the stack below % sp is always a bug .
2006-06-23 02:04:23 -07:00
* The large cushion allows instructions like enter
* and pusha to work . ( " enter $65535,$31 " pushes
2008-01-30 13:30:56 +01:00
* 32 pointers and then decrements % sp by 65535. )
2005-04-16 15:20:36 -07:00
*/
2008-01-30 13:30:56 +01:00
if ( address + 65536 + 32 * sizeof ( unsigned long ) < regs - > sp )
2005-04-16 15:20:36 -07:00
goto bad_area ;
}
if ( expand_stack ( vma , address ) )
goto bad_area ;
/*
* Ok , we have a good vm_area for this memory access , so
* we can handle it . .
*/
good_area :
2005-09-03 15:56:26 -07:00
si_code = SEGV_ACCERR ;
2005-04-16 15:20:36 -07:00
write = 0 ;
2008-01-30 13:32:19 +01:00
switch ( error_code & ( PF_PROT | PF_WRITE ) ) {
default : /* 3: write, present */
/* fall through */
case PF_WRITE : /* write, not present */
if ( ! ( vma - > vm_flags & VM_WRITE ) )
goto bad_area ;
write + + ;
break ;
case PF_PROT : /* read, present */
goto bad_area ;
case 0 : /* read, not present */
if ( ! ( vma - > vm_flags & ( VM_READ | VM_EXEC | VM_WRITE ) ) )
2005-04-16 15:20:36 -07:00
goto bad_area ;
}
survive :
/*
* If for any reason at all we couldn ' t handle the fault ,
* make sure we exit gracefully rather than endlessly redo
* the fault .
*/
2007-07-19 01:47:05 -07:00
fault = handle_mm_fault ( mm , vma , address , write ) ;
if ( unlikely ( fault & VM_FAULT_ERROR ) ) {
if ( fault & VM_FAULT_OOM )
2005-04-16 15:20:36 -07:00
goto out_of_memory ;
2007-07-19 01:47:05 -07:00
else if ( fault & VM_FAULT_SIGBUS )
goto do_sigbus ;
BUG ( ) ;
2005-04-16 15:20:36 -07:00
}
2007-07-19 01:47:05 -07:00
if ( fault & VM_FAULT_MAJOR )
tsk - > maj_flt + + ;
else
tsk - > min_flt + + ;
2005-04-16 15:20:36 -07:00
2008-01-30 13:33:23 +01:00
# ifdef CONFIG_X86_32
2005-04-16 15:20:36 -07:00
/*
* Did it hit the DOS screen memory VA from vm86 mode ?
*/
2008-01-30 13:33:23 +01:00
if ( v8086_mode ( regs ) ) {
2005-04-16 15:20:36 -07:00
unsigned long bit = ( address - 0xA0000 ) > > PAGE_SHIFT ;
if ( bit < 32 )
tsk - > thread . screen_bitmap | = 1 < < bit ;
}
2008-01-30 13:33:23 +01:00
# endif
2005-04-16 15:20:36 -07:00
up_read ( & mm - > mmap_sem ) ;
return ;
/*
* Something tried to access memory that isn ' t in our memory map . .
* Fix it , but check if it ' s kernel or user first . .
*/
bad_area :
up_read ( & mm - > mmap_sem ) ;
bad_area_nosemaphore :
/* User mode accesses just cause a SIGSEGV */
2008-01-30 13:32:19 +01:00
if ( error_code & PF_USER ) {
2007-06-06 23:34:04 -04:00
/*
* It ' s possible to have interrupts off here .
*/
local_irq_enable ( ) ;
2008-01-30 13:32:19 +01:00
/*
* Valid to do another page fault here because this one came
2005-04-16 15:20:36 -07:00
* from user space .
*/
if ( is_prefetch ( regs , address , error_code ) )
return ;
2007-07-22 11:12:28 +02:00
if ( show_unhandled_signals & & unhandled_signal ( tsk , SIGSEGV ) & &
printk_ratelimit ( ) ) {
2008-01-30 13:33:13 +01:00
printk (
# ifdef CONFIG_X86_32
2008-01-30 13:33:16 +01:00
" %s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx " ,
2008-01-30 13:33:13 +01:00
# else
2008-01-30 13:33:18 +01:00
" %s%s[%d]: segfault at %lx ip %lx sp %lx error %lx " ,
2008-01-30 13:33:13 +01:00
# endif
task_pid_nr ( tsk ) > 1 ? KERN_INFO : KERN_EMERG ,
tsk - > comm , task_pid_nr ( tsk ) , address , regs - > ip ,
regs - > sp , error_code ) ;
2008-01-30 13:33:18 +01:00
print_vma_addr ( " in " , regs - > ip ) ;
printk ( " \n " ) ;
2007-07-22 11:12:28 +02:00
}
2005-04-16 15:20:36 -07:00
tsk - > thread . cr2 = address ;
/* Kernel addresses are always protection faults */
tsk - > thread . error_code = error_code | ( address > = TASK_SIZE ) ;
tsk - > thread . trap_no = 14 ;
2005-09-03 15:56:26 -07:00
force_sig_info_fault ( SIGSEGV , si_code , address , tsk ) ;
2005-04-16 15:20:36 -07:00
return ;
}
# ifdef CONFIG_X86_F00F_BUG
/*
* Pentium F0 0F C7 C8 bug workaround .
*/
if ( boot_cpu_data . f00f_bug ) {
unsigned long nr ;
2008-01-30 13:32:19 +01:00
2005-04-16 15:20:36 -07:00
nr = ( address - idt_descr . address ) > > 3 ;
if ( nr = = 6 ) {
do_invalid_op ( regs , 0 ) ;
return ;
}
}
# endif
no_context :
/* Are we prepared to handle this kernel fault? */
if ( fixup_exception ( regs ) )
return ;
2008-01-30 13:32:19 +01:00
/*
2005-04-16 15:20:36 -07:00
* Valid to do another page fault here , because if this fault
2008-01-30 13:32:19 +01:00
* had been triggered by is_prefetch fixup_exception would have
2005-04-16 15:20:36 -07:00
* handled it .
*/
2008-01-30 13:32:19 +01:00
if ( is_prefetch ( regs , address , error_code ) )
return ;
2005-04-16 15:20:36 -07:00
2008-01-30 13:33:13 +01:00
if ( is_errata93 ( regs , address ) )
return ;
2005-04-16 15:20:36 -07:00
/*
* Oops . The kernel tried to access some bad page . We ' ll have to
* terminate things with extreme prejudice .
*/
bust_spinlocks ( 1 ) ;
2006-03-23 03:00:57 -08:00
if ( oops_may_print ( ) ) {
2007-05-02 19:27:04 +02:00
# ifdef CONFIG_X86_PAE
2008-01-30 13:32:59 +01:00
if ( error_code & PF_INSTR ) {
2008-01-30 13:33:43 +01:00
int level ;
pte_t * pte = lookup_address ( address , & level ) ;
2006-03-23 03:00:57 -08:00
2008-01-30 13:33:42 +01:00
if ( pte & & pte_present ( * pte ) & & ! pte_exec ( * pte ) )
2006-03-23 03:00:57 -08:00
printk ( KERN_CRIT " kernel tried to execute "
" NX-protected page - exploit attempt? "
" (uid: %d) \n " , current - > uid ) ;
}
2007-05-02 19:27:04 +02:00
# endif
2006-03-23 03:00:57 -08:00
if ( address < PAGE_SIZE )
printk ( KERN_ALERT " BUG: unable to handle kernel NULL "
" pointer dereference " ) ;
else
printk ( KERN_ALERT " BUG: unable to handle kernel paging "
" request " ) ;
2008-01-30 13:32:19 +01:00
printk ( " at virtual address %08lx \n " , address ) ;
2008-01-30 13:30:56 +01:00
printk ( KERN_ALERT " printing ip: %08lx " , regs - > ip ) ;
2007-05-02 19:27:04 +02:00
2008-01-30 13:33:42 +01:00
dump_pagetable ( address ) ;
2007-05-02 19:27:04 +02:00
}
2005-06-25 14:58:27 -07:00
tsk - > thread . cr2 = address ;
tsk - > thread . trap_no = 14 ;
tsk - > thread . error_code = error_code ;
2005-04-16 15:20:36 -07:00
die ( " Oops " , regs , error_code ) ;
bust_spinlocks ( 0 ) ;
do_exit ( SIGKILL ) ;
/*
* We ran out of memory , or some other thing happened to us that made
* us unable to handle the page fault gracefully .
*/
out_of_memory :
up_read ( & mm - > mmap_sem ) ;
2007-10-18 23:39:52 -07:00
if ( is_global_init ( tsk ) ) {
2005-04-16 15:20:36 -07:00
yield ( ) ;
down_read ( & mm - > mmap_sem ) ;
goto survive ;
}
printk ( " VM: killing process %s \n " , tsk - > comm ) ;
2008-01-30 13:32:59 +01:00
if ( error_code & PF_USER )
2007-10-16 01:24:18 -07:00
do_group_exit ( SIGKILL ) ;
2005-04-16 15:20:36 -07:00
goto no_context ;
do_sigbus :
up_read ( & mm - > mmap_sem ) ;
/* Kernel mode? Handle exceptions or die */
2008-01-30 13:32:19 +01:00
if ( ! ( error_code & PF_USER ) )
2005-04-16 15:20:36 -07:00
goto no_context ;
/* User space => ok to do another page fault */
if ( is_prefetch ( regs , address , error_code ) )
return ;
tsk - > thread . cr2 = address ;
tsk - > thread . error_code = error_code ;
tsk - > thread . trap_no = 14 ;
2005-09-03 15:56:26 -07:00
force_sig_info_fault ( SIGBUS , BUS_ADRERR , address , tsk ) ;
2006-03-23 02:59:45 -08:00
}
2005-04-16 15:20:36 -07:00
2006-03-23 02:59:45 -08:00
void vmalloc_sync_all ( void )
{
/*
* Note that races in the updates of insync and start aren ' t
* problematic : insync can only get set bits added , and updates to
* start are only improving performance ( without affecting correctness
* if undone ) .
*/
static DECLARE_BITMAP ( insync , PTRS_PER_PGD ) ;
static unsigned long start = TASK_SIZE ;
unsigned long address ;
2005-04-16 15:20:36 -07:00
[PATCH] i386: PARAVIRT: Allow paravirt backend to choose kernel PMD sharing
Normally when running in PAE mode, the 4th PMD maps the kernel address space,
which can be shared among all processes (since they all need the same kernel
mappings).
Xen, however, does not allow guests to have the kernel pmd shared between page
tables, so parameterize pgtable.c to allow both modes of operation.
There are several side-effects of this. One is that vmalloc will update the
kernel address space mappings, and those updates need to be propagated into
all processes if the kernel mappings are not intrinsically shared. In the
non-PAE case, this is done by maintaining a pgd_list of all processes; this
list is used when all process pagetables must be updated. pgd_list is
threaded via otherwise unused entries in the page structure for the pgd, which
means that the pgd must be page-sized for this to work.
Normally the PAE pgd is only 4x64 byte entries large, but Xen requires the PAE
pgd to page aligned anyway, so this patch forces the pgd to be page
aligned+sized when the kernel pmd is unshared, to accomodate both these
requirements.
Also, since there may be several distinct kernel pmds (if the user/kernel
split is below 3G), there's no point in allocating them from a slab cache;
they're just allocated with get_free_page and initialized appropriately. (Of
course the could be cached if there is just a single kernel pmd - which is the
default with a 3G user/kernel split - but it doesn't seem worthwhile to add
yet another case into this code).
[ Many thanks to wli for review comments. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Christoph Lameter <clameter@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2007-05-02 19:27:13 +02:00
if ( SHARED_KERNEL_PMD )
return ;
2006-03-23 02:59:45 -08:00
BUILD_BUG_ON ( TASK_SIZE & ~ PGDIR_MASK ) ;
for ( address = start ; address > = TASK_SIZE ; address + = PGDIR_SIZE ) {
if ( ! test_bit ( pgd_index ( address ) , insync ) ) {
unsigned long flags ;
struct page * page ;
spin_lock_irqsave ( & pgd_lock , flags ) ;
for ( page = pgd_list ; page ; page =
( struct page * ) page - > index )
if ( ! vmalloc_sync_one ( page_address ( page ) ,
address ) ) {
BUG_ON ( page ! = pgd_list ) ;
break ;
}
spin_unlock_irqrestore ( & pgd_lock , flags ) ;
if ( ! page )
set_bit ( pgd_index ( address ) , insync ) ;
}
if ( address = = start & & test_bit ( pgd_index ( address ) , insync ) )
start = address + PGDIR_SIZE ;
2005-04-16 15:20:36 -07:00
}
}