2005-04-17 02:20:36 +04:00
/*
* Handle unaligned accesses by emulation .
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1996 , 1998 , 1999 , 2002 by Ralf Baechle
* Copyright ( C ) 1999 Silicon Graphics , Inc .
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software . The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary .
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected . Much worse is that such code is non -
* portable . Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn ' t intend to do this at all for user code .
*
* For now I enable fixing of address errors by default to make life easier .
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed . For programmers this is the
* right way to go .
*
* Fixing address errors is a per process option . The option is inherited
* across fork ( 2 ) and execve ( 2 ) calls . If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff :
*
* # include < sys / sysmips . h >
*
* . . .
* sysmips ( MIPS_FIXADE , x ) ;
* . . .
*
* The argument x is 0 for disabling software emulation , enabled otherwise .
*
* Below a little program to play around with this feature .
*
* # include < stdio . h >
* # include < sys / sysmips . h >
*
* struct foo {
* unsigned char bar [ 8 ] ;
* } ;
*
* main ( int argc , char * argv [ ] )
* {
* struct foo x = { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 } ;
* unsigned int * p = ( unsigned int * ) ( x . bar + 3 ) ;
* int i ;
*
* if ( argc > 1 )
* sysmips ( MIPS_FIXADE , atoi ( argv [ 1 ] ) ) ;
*
* printf ( " *p = %08lx \n " , * p ) ;
*
* * p = 0xdeadface ;
*
* for ( i = 0 ; i < = 7 ; i + + )
* printf ( " %02x " , x . bar [ i ] ) ;
* printf ( " \n " ) ;
* }
*
* Coprocessor loads are not supported ; I think this case is unimportant
* in the practice .
*
* TODO : Handle ndc ( attempted store to doubleword in uncached memory )
* exception for the R6000 .
* A store crossing a page boundary might be executed only partially .
* Undo the partial store in this case .
*/
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/signal.h>
# include <linux/smp.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2007-06-29 19:55:48 +04:00
# include <linux/debugfs.h>
2005-04-17 02:20:36 +04:00
# include <asm/asm.h>
# include <asm/branch.h>
# include <asm/byteorder.h>
# include <asm/inst.h>
# include <asm/uaccess.h>
# include <asm/system.h>
# define STR(x) __STR(x)
# define __STR(x) #x
2007-06-29 19:55:48 +04:00
enum {
UNALIGNED_ACTION_QUIET ,
UNALIGNED_ACTION_SIGNAL ,
UNALIGNED_ACTION_SHOW ,
} ;
# ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions ;
static u32 unaligned_action ;
# else
# define unaligned_action UNALIGNED_ACTION_QUIET
2005-04-17 02:20:36 +04:00
# endif
2007-06-29 19:55:48 +04:00
extern void show_registers ( struct pt_regs * regs ) ;
2005-04-17 02:20:36 +04:00
2007-07-29 12:16:19 +04:00
static void emulate_load_store_insn ( struct pt_regs * regs ,
void __user * addr , unsigned int __user * pc )
2005-04-17 02:20:36 +04:00
{
union mips_instruction insn ;
unsigned long value ;
unsigned int res ;
regs - > regs [ 0 ] = 0 ;
/*
* This load never faults .
*/
2005-03-01 22:22:29 +03:00
__get_user ( insn . word , pc ) ;
2005-04-17 02:20:36 +04:00
switch ( insn . i_format . opcode ) {
/*
* These are instructions that a compiler doesn ' t generate . We
* can assume therefore that the code is MIPS - aware and
* really buggy . Emulating these instructions would break the
* semantics anyway .
*/
case ll_op :
case lld_op :
case sc_op :
case scd_op :
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel / supervisor address
* space .
*/
case ldl_op :
case ldr_op :
case lwl_op :
case lwr_op :
case sdl_op :
case sdr_op :
case swl_op :
case swr_op :
case lb_op :
case lbu_op :
case sb_op :
goto sigbus ;
/*
* The remaining opcodes are the ones that are really of interest .
*/
case lh_op :
if ( ! access_ok ( VERIFY_READ , addr , 2 ) )
goto sigbus ;
__asm__ __volatile__ ( " .set \t noat \n "
# ifdef __BIG_ENDIAN
" 1: \t lb \t %0, 0(%2) \n "
" 2: \t lbu \t $1, 1(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t lb \t %0, 1(%2) \n "
" 2: \t lbu \t $1, 0(%2) \n \t "
# endif
" sll \t %0, 0x8 \n \t "
" or \t %0, $1 \n \t "
" li \t %1, 0 \n "
" 3: \t .set \t at \n \t "
" .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %1, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =&r " ( value ) , " =r " ( res )
: " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
regs - > regs [ insn . i_format . rt ] = value ;
2005-04-17 02:20:36 +04:00
break ;
case lw_op :
if ( ! access_ok ( VERIFY_READ , addr , 4 ) )
goto sigbus ;
__asm__ __volatile__ (
# ifdef __BIG_ENDIAN
" 1: \t lwl \t %0, (%2) \n "
" 2: \t lwr \t %0, 3(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t lwl \t %0, 3(%2) \n "
" 2: \t lwr \t %0, (%2) \n \t "
# endif
" li \t %1, 0 \n "
" 3: \t .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %1, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =&r " ( value ) , " =r " ( res )
: " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
regs - > regs [ insn . i_format . rt ] = value ;
2005-04-17 02:20:36 +04:00
break ;
case lhu_op :
if ( ! access_ok ( VERIFY_READ , addr , 2 ) )
goto sigbus ;
__asm__ __volatile__ (
" .set \t noat \n "
# ifdef __BIG_ENDIAN
" 1: \t lbu \t %0, 0(%2) \n "
" 2: \t lbu \t $1, 1(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t lbu \t %0, 1(%2) \n "
" 2: \t lbu \t $1, 0(%2) \n \t "
# endif
" sll \t %0, 0x8 \n \t "
" or \t %0, $1 \n \t "
" li \t %1, 0 \n "
" 3: \t .set \t at \n \t "
" .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %1, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =&r " ( value ) , " =r " ( res )
: " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
regs - > regs [ insn . i_format . rt ] = value ;
2005-04-17 02:20:36 +04:00
break ;
case lwu_op :
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* A 32 - bit kernel might be running on a 64 - bit processor . But
* if we ' re on a 32 - bit processor and an i - cache incoherency
* or race makes us see a 64 - bit instruction here the sdl / sdr
* would blow up , so for now we don ' t handle unaligned 64 - bit
* instructions on 32 - bit kernels .
*/
if ( ! access_ok ( VERIFY_READ , addr , 4 ) )
goto sigbus ;
__asm__ __volatile__ (
# ifdef __BIG_ENDIAN
" 1: \t lwl \t %0, (%2) \n "
" 2: \t lwr \t %0, 3(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t lwl \t %0, 3(%2) \n "
" 2: \t lwr \t %0, (%2) \n \t "
# endif
" dsll \t %0, %0, 32 \n \t "
" dsrl \t %0, %0, 32 \n \t "
" li \t %1, 0 \n "
" 3: \t .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %1, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =&r " ( value ) , " =r " ( res )
: " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
regs - > regs [ insn . i_format . rt ] = value ;
2005-04-17 02:20:36 +04:00
break ;
2005-09-04 02:56:16 +04:00
# endif /* CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill ;
case ld_op :
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* A 32 - bit kernel might be running on a 64 - bit processor . But
* if we ' re on a 32 - bit processor and an i - cache incoherency
* or race makes us see a 64 - bit instruction here the sdl / sdr
* would blow up , so for now we don ' t handle unaligned 64 - bit
* instructions on 32 - bit kernels .
*/
if ( ! access_ok ( VERIFY_READ , addr , 8 ) )
goto sigbus ;
__asm__ __volatile__ (
# ifdef __BIG_ENDIAN
" 1: \t ldl \t %0, (%2) \n "
" 2: \t ldr \t %0, 7(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t ldl \t %0, 7(%2) \n "
" 2: \t ldr \t %0, (%2) \n \t "
# endif
" li \t %1, 0 \n "
" 3: \t .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %1, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =&r " ( value ) , " =r " ( res )
: " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
regs - > regs [ insn . i_format . rt ] = value ;
2005-04-17 02:20:36 +04:00
break ;
2005-09-04 02:56:16 +04:00
# endif /* CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill ;
case sh_op :
if ( ! access_ok ( VERIFY_WRITE , addr , 2 ) )
goto sigbus ;
value = regs - > regs [ insn . i_format . rt ] ;
__asm__ __volatile__ (
# ifdef __BIG_ENDIAN
" .set \t noat \n "
" 1: \t sb \t %1, 1(%2) \n \t "
" srl \t $1, %1, 0x8 \n "
" 2: \t sb \t $1, 0(%2) \n \t "
" .set \t at \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" .set \t noat \n "
" 1: \t sb \t %1, 0(%2) \n \t "
" srl \t $1,%1, 0x8 \n "
" 2: \t sb \t $1, 1(%2) \n \t "
" .set \t at \n \t "
# endif
" li \t %0, 0 \n "
" 3: \n \t "
" .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %0, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =r " ( res )
: " r " ( value ) , " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
2005-04-17 02:20:36 +04:00
break ;
case sw_op :
if ( ! access_ok ( VERIFY_WRITE , addr , 4 ) )
goto sigbus ;
value = regs - > regs [ insn . i_format . rt ] ;
__asm__ __volatile__ (
# ifdef __BIG_ENDIAN
" 1: \t swl \t %1,(%2) \n "
" 2: \t swr \t %1, 3(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t swl \t %1, 3(%2) \n "
" 2: \t swr \t %1, (%2) \n \t "
# endif
" li \t %0, 0 \n "
" 3: \n \t "
" .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %0, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =r " ( res )
: " r " ( value ) , " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
2005-04-17 02:20:36 +04:00
break ;
case sd_op :
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* A 32 - bit kernel might be running on a 64 - bit processor . But
* if we ' re on a 32 - bit processor and an i - cache incoherency
* or race makes us see a 64 - bit instruction here the sdl / sdr
* would blow up , so for now we don ' t handle unaligned 64 - bit
* instructions on 32 - bit kernels .
*/
if ( ! access_ok ( VERIFY_WRITE , addr , 8 ) )
goto sigbus ;
value = regs - > regs [ insn . i_format . rt ] ;
__asm__ __volatile__ (
# ifdef __BIG_ENDIAN
" 1: \t sdl \t %1,(%2) \n "
" 2: \t sdr \t %1, 7(%2) \n \t "
# endif
# ifdef __LITTLE_ENDIAN
" 1: \t sdl \t %1, 7(%2) \n "
" 2: \t sdr \t %1, (%2) \n \t "
# endif
" li \t %0, 0 \n "
" 3: \n \t "
" .section \t .fixup, \" ax \" \n \t "
" 4: \t li \t %0, %3 \n \t "
" j \t 3b \n \t "
" .previous \n \t "
" .section \t __ex_table, \" a \" \n \t "
STR ( PTR ) " \t 1b, 4b \n \t "
STR ( PTR ) " \t 2b, 4b \n \t "
" .previous "
: " =r " ( res )
: " r " ( value ) , " r " ( addr ) , " i " ( - EFAULT ) ) ;
if ( res )
goto fault ;
2007-07-29 12:16:19 +04:00
compute_return_epc ( regs ) ;
2005-04-17 02:20:36 +04:00
break ;
2005-09-04 02:56:16 +04:00
# endif /* CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill ;
case lwc1_op :
case ldc1_op :
case swc1_op :
case sdc1_op :
/*
* I herewith declare : this does not happen . So send SIGBUS .
*/
goto sigbus ;
case lwc2_op :
case ldc2_op :
case swc2_op :
case sdc2_op :
/*
* These are the coprocessor 2 load / stores . The current
* implementations don ' t use cp2 and cp2 should always be
* disabled in c0_status . So send SIGILL .
* ( No longer true : The Sony Praystation uses cp2 for
* 3 D matrix operations . Dunno if that thingy has a MMU . . . )
*/
default :
/*
* Pheeee . . . We encountered an yet unknown instruction or
* cache coherence problem . Die sucker , die . . .
*/
goto sigill ;
}
2007-06-29 19:55:48 +04:00
# ifdef CONFIG_DEBUG_FS
2005-04-17 02:20:36 +04:00
unaligned_instructions + + ;
# endif
2007-07-29 12:16:19 +04:00
return ;
2005-04-17 02:20:36 +04:00
fault :
/* Did we have an exception handler installed? */
if ( fixup_exception ( regs ) )
2007-07-29 12:16:19 +04:00
return ;
2005-04-17 02:20:36 +04:00
2007-10-12 02:46:15 +04:00
die_if_kernel ( " Unhandled kernel unaligned access " , regs ) ;
2005-04-17 02:20:36 +04:00
send_sig ( SIGSEGV , current , 1 ) ;
2007-07-29 12:16:19 +04:00
return ;
2005-04-17 02:20:36 +04:00
sigbus :
die_if_kernel ( " Unhandled kernel unaligned access " , regs ) ;
send_sig ( SIGBUS , current , 1 ) ;
2007-07-29 12:16:19 +04:00
return ;
2005-04-17 02:20:36 +04:00
sigill :
die_if_kernel ( " Unhandled kernel unaligned access or invalid instruction " , regs ) ;
send_sig ( SIGILL , current , 1 ) ;
}
asmlinkage void do_ade ( struct pt_regs * regs )
{
extern int do_dsemulret ( struct pt_regs * ) ;
2005-03-01 22:22:29 +03:00
unsigned int __user * pc ;
2005-04-17 02:20:36 +04:00
mm_segment_t seg ;
/*
* Address errors may be deliberately induced by the FPU emulator to
* retake control of the CPU after executing the instruction in the
* delay slot of an emulated branch .
*/
/* Terminate if exception was recognized as a delay slot return */
if ( do_dsemulret ( regs ) )
return ;
/* Otherwise handle as normal */
/*
* Did we catch a fault trying to load an instruction ?
* Or are we running in MIPS16 mode ?
*/
if ( ( regs - > cp0_badvaddr = = regs - > cp0_epc ) | | ( regs - > cp0_epc & 0x1 ) )
goto sigbus ;
2005-03-01 22:22:29 +03:00
pc = ( unsigned int __user * ) exception_epc ( regs ) ;
2007-07-25 19:19:33 +04:00
if ( user_mode ( regs ) & & ! test_thread_flag ( TIF_FIXADE ) )
2005-04-17 02:20:36 +04:00
goto sigbus ;
2007-06-29 19:55:48 +04:00
if ( unaligned_action = = UNALIGNED_ACTION_SIGNAL )
goto sigbus ;
else if ( unaligned_action = = UNALIGNED_ACTION_SHOW )
show_registers ( regs ) ;
2005-04-17 02:20:36 +04:00
/*
* Do branch emulation only if we didn ' t forward the exception .
* This is all so but ugly . . .
*/
seg = get_fs ( ) ;
if ( ! user_mode ( regs ) )
set_fs ( KERNEL_DS ) ;
2007-07-29 12:16:19 +04:00
emulate_load_store_insn ( regs , ( void __user * ) regs - > cp0_badvaddr , pc ) ;
2005-04-17 02:20:36 +04:00
set_fs ( seg ) ;
return ;
sigbus :
die_if_kernel ( " Kernel unaligned instruction access " , regs ) ;
force_sig ( SIGBUS , current ) ;
/*
* XXX On return from the signal handler we should advance the epc
*/
}
2007-06-29 19:55:48 +04:00
# ifdef CONFIG_DEBUG_FS
extern struct dentry * mips_debugfs_dir ;
static int __init debugfs_unaligned ( void )
{
struct dentry * d ;
if ( ! mips_debugfs_dir )
return - ENODEV ;
d = debugfs_create_u32 ( " unaligned_instructions " , S_IRUGO ,
mips_debugfs_dir , & unaligned_instructions ) ;
if ( IS_ERR ( d ) )
return PTR_ERR ( d ) ;
d = debugfs_create_u32 ( " unaligned_action " , S_IRUGO | S_IWUSR ,
mips_debugfs_dir , & unaligned_action ) ;
if ( IS_ERR ( d ) )
return PTR_ERR ( d ) ;
return 0 ;
}
__initcall ( debugfs_unaligned ) ;
# endif