2005-04-17 02:20:36 +04:00
/* align.c - handle alignment exceptions for the Power PC.
*
* Copyright ( c ) 1996 Paul Mackerras < paulus @ cs . anu . edu . au >
* Copyright ( c ) 1998 - 1999 TiVo , Inc .
* PowerPC 403 GCX modifications .
* Copyright ( c ) 1999 Grant Erickson < grant @ lcse . umn . edu >
* PowerPC 403 GCX / 405 GP modifications .
* Copyright ( c ) 2001 - 2002 PPC64 team , IBM Corp
* 64 - bit and Power4 support
2005-11-18 06:09:41 +03:00
* Copyright ( c ) 2005 Benjamin Herrenschmidt , IBM Corp
* < benh @ kernel . crashing . org >
* Merge ppc32 and ppc64 implementations
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/kernel.h>
# include <linux/mm.h>
# include <asm/processor.h>
# include <asm/uaccess.h>
# include <asm/cache.h>
# include <asm/cputable.h>
2009-05-18 06:10:05 +04:00
# include <asm/emulated_ops.h>
2012-03-28 21:30:02 +04:00
# include <asm/switch_to.h>
2014-05-12 15:34:06 +04:00
# include <asm/disassemble.h>
2016-07-23 12:12:40 +03:00
# include <asm/cpu_has_feature.h>
2005-04-17 02:20:36 +04:00
struct aligninfo {
unsigned char len ;
unsigned char flags ;
} ;
# define INVALID { 0, 0 }
2006-06-07 10:14:40 +04:00
/* Bits in the flags field */
# define LD 0 /* load */
# define ST 1 /* store */
2007-08-10 08:07:38 +04:00
# define SE 2 /* sign-extend value, or FP ld/st as word */
2006-06-07 10:14:40 +04:00
# define F 4 /* to/from fp regs */
# define U 8 /* update index register */
# define M 0x10 /* multiple load/store */
# define SW 0x20 /* byte swap */
# define S 0x40 /* single-precision fp or... */
# define SX 0x40 /* ... byte count in XER */
2005-11-18 06:09:41 +03:00
# define HARD 0x80 /* string, stwcx. */
2007-08-25 01:42:53 +04:00
# define E4 0x40 /* SPE endianness is word */
# define E8 0x80 /* SPE endianness is double word */
2008-07-11 10:31:09 +04:00
# define SPLT 0x80 /* VSX SPLAT load */
2005-04-17 02:20:36 +04:00
2006-06-07 10:14:40 +04:00
/* DSISR bits reported for a DCBZ instruction: */
2005-04-17 02:20:36 +04:00
# define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
/*
* The PowerPC stores certain bits of the instruction that caused the
* alignment exception in the DSISR register . This array maps those
* bits to information about the operand length and what the
* instruction would do .
*/
static struct aligninfo aligninfo [ 128 ] = {
{ 4 , LD } , /* 00 0 0000: lwz / lwarx */
INVALID , /* 00 0 0001 */
{ 4 , ST } , /* 00 0 0010: stw */
INVALID , /* 00 0 0011 */
{ 2 , LD } , /* 00 0 0100: lhz */
{ 2 , LD + SE } , /* 00 0 0101: lha */
{ 2 , ST } , /* 00 0 0110: sth */
{ 4 , LD + M } , /* 00 0 0111: lmw */
2005-11-18 06:09:41 +03:00
{ 4 , LD + F + S } , /* 00 0 1000: lfs */
2005-04-17 02:20:36 +04:00
{ 8 , LD + F } , /* 00 0 1001: lfd */
2005-11-18 06:09:41 +03:00
{ 4 , ST + F + S } , /* 00 0 1010: stfs */
2005-04-17 02:20:36 +04:00
{ 8 , ST + F } , /* 00 0 1011: stfd */
2014-03-28 10:01:23 +04:00
{ 16 , LD } , /* 00 0 1100: lq */
2005-11-18 06:09:41 +03:00
{ 8 , LD } , /* 00 0 1101: ld/ldu/lwa */
2005-04-17 02:20:36 +04:00
INVALID , /* 00 0 1110 */
2005-11-18 06:09:41 +03:00
{ 8 , ST } , /* 00 0 1111: std/stdu */
2005-04-17 02:20:36 +04:00
{ 4 , LD + U } , /* 00 1 0000: lwzu */
INVALID , /* 00 1 0001 */
{ 4 , ST + U } , /* 00 1 0010: stwu */
INVALID , /* 00 1 0011 */
{ 2 , LD + U } , /* 00 1 0100: lhzu */
{ 2 , LD + SE + U } , /* 00 1 0101: lhau */
{ 2 , ST + U } , /* 00 1 0110: sthu */
{ 4 , ST + M } , /* 00 1 0111: stmw */
2005-11-18 06:09:41 +03:00
{ 4 , LD + F + S + U } , /* 00 1 1000: lfsu */
2005-04-17 02:20:36 +04:00
{ 8 , LD + F + U } , /* 00 1 1001: lfdu */
2005-11-18 06:09:41 +03:00
{ 4 , ST + F + S + U } , /* 00 1 1010: stfsu */
2005-04-17 02:20:36 +04:00
{ 8 , ST + F + U } , /* 00 1 1011: stfdu */
2007-08-10 08:07:38 +04:00
{ 16 , LD + F } , /* 00 1 1100: lfdp */
2005-04-17 02:20:36 +04:00
INVALID , /* 00 1 1101 */
2007-08-10 08:07:38 +04:00
{ 16 , ST + F } , /* 00 1 1110: stfdp */
2005-04-17 02:20:36 +04:00
INVALID , /* 00 1 1111 */
{ 8 , LD } , /* 01 0 0000: ldx */
INVALID , /* 01 0 0001 */
{ 8 , ST } , /* 01 0 0010: stdx */
INVALID , /* 01 0 0011 */
INVALID , /* 01 0 0100 */
{ 4 , LD + SE } , /* 01 0 0101: lwax */
INVALID , /* 01 0 0110 */
INVALID , /* 01 0 0111 */
2005-11-18 06:09:41 +03:00
{ 4 , LD + M + HARD + SX } , /* 01 0 1000: lswx */
{ 4 , LD + M + HARD } , /* 01 0 1001: lswi */
{ 4 , ST + M + HARD + SX } , /* 01 0 1010: stswx */
{ 4 , ST + M + HARD } , /* 01 0 1011: stswi */
2005-04-17 02:20:36 +04:00
INVALID , /* 01 0 1100 */
{ 8 , LD + U } , /* 01 0 1101: ldu */
INVALID , /* 01 0 1110 */
{ 8 , ST + U } , /* 01 0 1111: stdu */
{ 8 , LD + U } , /* 01 1 0000: ldux */
INVALID , /* 01 1 0001 */
{ 8 , ST + U } , /* 01 1 0010: stdux */
INVALID , /* 01 1 0011 */
INVALID , /* 01 1 0100 */
{ 4 , LD + SE + U } , /* 01 1 0101: lwaux */
INVALID , /* 01 1 0110 */
INVALID , /* 01 1 0111 */
INVALID , /* 01 1 1000 */
INVALID , /* 01 1 1001 */
INVALID , /* 01 1 1010 */
INVALID , /* 01 1 1011 */
INVALID , /* 01 1 1100 */
INVALID , /* 01 1 1101 */
INVALID , /* 01 1 1110 */
INVALID , /* 01 1 1111 */
INVALID , /* 10 0 0000 */
INVALID , /* 10 0 0001 */
2005-11-18 06:09:41 +03:00
INVALID , /* 10 0 0010: stwcx. */
2005-04-17 02:20:36 +04:00
INVALID , /* 10 0 0011 */
INVALID , /* 10 0 0100 */
INVALID , /* 10 0 0101 */
INVALID , /* 10 0 0110 */
INVALID , /* 10 0 0111 */
{ 4 , LD + SW } , /* 10 0 1000: lwbrx */
INVALID , /* 10 0 1001 */
{ 4 , ST + SW } , /* 10 0 1010: stwbrx */
INVALID , /* 10 0 1011 */
{ 2 , LD + SW } , /* 10 0 1100: lhbrx */
{ 4 , LD + SE } , /* 10 0 1101 lwa */
{ 2 , ST + SW } , /* 10 0 1110: sthbrx */
2014-03-28 10:01:23 +04:00
{ 16 , ST } , /* 10 0 1111: stq */
2005-04-17 02:20:36 +04:00
INVALID , /* 10 1 0000 */
INVALID , /* 10 1 0001 */
INVALID , /* 10 1 0010 */
INVALID , /* 10 1 0011 */
INVALID , /* 10 1 0100 */
INVALID , /* 10 1 0101 */
INVALID , /* 10 1 0110 */
INVALID , /* 10 1 0111 */
INVALID , /* 10 1 1000 */
INVALID , /* 10 1 1001 */
INVALID , /* 10 1 1010 */
INVALID , /* 10 1 1011 */
INVALID , /* 10 1 1100 */
INVALID , /* 10 1 1101 */
INVALID , /* 10 1 1110 */
2005-11-18 06:09:41 +03:00
{ 0 , ST + HARD } , /* 10 1 1111: dcbz */
2005-04-17 02:20:36 +04:00
{ 4 , LD } , /* 11 0 0000: lwzx */
INVALID , /* 11 0 0001 */
{ 4 , ST } , /* 11 0 0010: stwx */
INVALID , /* 11 0 0011 */
{ 2 , LD } , /* 11 0 0100: lhzx */
{ 2 , LD + SE } , /* 11 0 0101: lhax */
{ 2 , ST } , /* 11 0 0110: sthx */
INVALID , /* 11 0 0111 */
2005-11-18 06:09:41 +03:00
{ 4 , LD + F + S } , /* 11 0 1000: lfsx */
2005-04-17 02:20:36 +04:00
{ 8 , LD + F } , /* 11 0 1001: lfdx */
2005-11-18 06:09:41 +03:00
{ 4 , ST + F + S } , /* 11 0 1010: stfsx */
2005-04-17 02:20:36 +04:00
{ 8 , ST + F } , /* 11 0 1011: stfdx */
2007-08-10 08:07:38 +04:00
{ 16 , LD + F } , /* 11 0 1100: lfdpx */
{ 4 , LD + F + SE } , /* 11 0 1101: lfiwax */
{ 16 , ST + F } , /* 11 0 1110: stfdpx */
{ 4 , ST + F } , /* 11 0 1111: stfiwx */
2005-04-17 02:20:36 +04:00
{ 4 , LD + U } , /* 11 1 0000: lwzux */
INVALID , /* 11 1 0001 */
{ 4 , ST + U } , /* 11 1 0010: stwux */
INVALID , /* 11 1 0011 */
{ 2 , LD + U } , /* 11 1 0100: lhzux */
{ 2 , LD + SE + U } , /* 11 1 0101: lhaux */
{ 2 , ST + U } , /* 11 1 0110: sthux */
INVALID , /* 11 1 0111 */
2005-11-18 06:09:41 +03:00
{ 4 , LD + F + S + U } , /* 11 1 1000: lfsux */
2005-04-17 02:20:36 +04:00
{ 8 , LD + F + U } , /* 11 1 1001: lfdux */
2005-11-18 06:09:41 +03:00
{ 4 , ST + F + S + U } , /* 11 1 1010: stfsux */
2005-04-17 02:20:36 +04:00
{ 8 , ST + F + U } , /* 11 1 1011: stfdux */
INVALID , /* 11 1 1100 */
2009-02-19 21:51:37 +03:00
{ 4 , LD + F } , /* 11 1 1101: lfiwzx */
2005-04-17 02:20:36 +04:00
INVALID , /* 11 1 1110 */
INVALID , /* 11 1 1111 */
} ;
2005-11-18 06:09:41 +03:00
/*
* The dcbz ( data cache block zero ) instruction
* gives an alignment fault if used on non - cacheable
* memory . We handle the fault mainly for the
* case when we are running with the cache disabled
* for debugging .
*/
static int emulate_dcbz ( struct pt_regs * regs , unsigned char __user * addr )
{
long __user * p ;
int i , size ;
# ifdef __powerpc64__
size = ppc64_caches . dline_size ;
# else
size = L1_CACHE_BYTES ;
# endif
p = ( long __user * ) ( regs - > dar & - size ) ;
if ( user_mode ( regs ) & & ! access_ok ( VERIFY_WRITE , p , size ) )
return - EFAULT ;
for ( i = 0 ; i < size / sizeof ( long ) ; + + i )
2007-04-11 10:13:19 +04:00
if ( __put_user_inatomic ( 0 , p + i ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
return 1 ;
}
/*
* Emulate load & store multiple instructions
* On 64 - bit machines , these instructions only affect / use the
* bottom 4 bytes of each register , and the loads clear the
* top 4 bytes of the affected register .
*/
2013-10-18 21:07:10 +04:00
# ifdef __BIG_ENDIAN__
2005-11-18 06:09:41 +03:00
# ifdef CONFIG_PPC64
# define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
# else
# define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
# endif
2016-05-18 04:16:52 +03:00
# else
2013-10-18 21:07:10 +04:00
# define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
# endif
2005-11-18 06:09:41 +03:00
2006-06-07 10:14:40 +04:00
# define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
2005-11-18 06:09:41 +03:00
static int emulate_multiple ( struct pt_regs * regs , unsigned char __user * addr ,
unsigned int reg , unsigned int nb ,
2006-06-07 10:14:40 +04:00
unsigned int flags , unsigned int instr ,
unsigned long swiz )
2005-11-18 06:09:41 +03:00
{
unsigned long * rptr ;
2006-06-07 10:14:40 +04:00
unsigned int nb0 , i , bswiz ;
unsigned long p ;
2005-11-18 06:09:41 +03:00
/*
* We do not try to emulate 8 bytes multiple as they aren ' t really
* available in our operating environments and we don ' t try to
* emulate multiples operations in kernel land as they should never
* be used / generated there at least not on unaligned boundaries
*/
if ( unlikely ( ( nb > 4 ) | | ! user_mode ( regs ) ) )
return 0 ;
/* lmw, stmw, lswi/x, stswi/x */
nb0 = 0 ;
if ( flags & HARD ) {
if ( flags & SX ) {
nb = regs - > xer & 127 ;
if ( nb = = 0 )
return 1 ;
} else {
2006-06-07 10:14:40 +04:00
unsigned long pc = regs - > nip ^ ( swiz & 4 ) ;
2007-04-11 10:13:19 +04:00
if ( __get_user_inatomic ( instr ,
( unsigned int __user * ) pc ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
2006-06-07 10:14:40 +04:00
if ( swiz = = 0 & & ( flags & SW ) )
instr = cpu_to_le32 ( instr ) ;
2005-11-18 06:09:41 +03:00
nb = ( instr > > 11 ) & 0x1f ;
if ( nb = = 0 )
nb = 32 ;
2005-04-17 02:20:36 +04:00
}
2005-11-18 06:09:41 +03:00
if ( nb + reg * 4 > 128 ) {
nb0 = nb + reg * 4 - 128 ;
nb = 128 - reg * 4 ;
}
2013-10-18 21:07:10 +04:00
# ifdef __LITTLE_ENDIAN__
/*
* String instructions are endian neutral but the code
* below is not . Force byte swapping on so that the
* effects of swizzling are undone in the load / store
* loops below .
*/
flags ^ = SW ;
# endif
2005-11-18 06:09:41 +03:00
} else {
/* lwm, stmw */
nb = ( 32 - reg ) * 4 ;
2005-04-17 02:20:36 +04:00
}
2005-11-18 06:09:41 +03:00
if ( ! access_ok ( ( flags & ST ? VERIFY_WRITE : VERIFY_READ ) , addr , nb + nb0 ) )
return - EFAULT ; /* bad address */
rptr = & regs - > gpr [ reg ] ;
2006-06-07 10:14:40 +04:00
p = ( unsigned long ) addr ;
bswiz = ( flags & SW ) ? 3 : 0 ;
if ( ! ( flags & ST ) ) {
2005-11-18 06:09:41 +03:00
/*
* This zeroes the top 4 bytes of the affected registers
* in 64 - bit mode , and also zeroes out any remaining
* bytes of the last register for lsw * .
*/
memset ( rptr , 0 , ( ( nb + 3 ) / 4 ) * sizeof ( unsigned long ) ) ;
if ( nb0 > 0 )
memset ( & regs - > gpr [ 0 ] , 0 ,
( ( nb0 + 3 ) / 4 ) * sizeof ( unsigned long ) ) ;
2006-06-07 10:14:40 +04:00
for ( i = 0 ; i < nb ; + + i , + + p )
2007-04-11 10:13:19 +04:00
if ( __get_user_inatomic ( REG_BYTE ( rptr , i ^ bswiz ) ,
SWIZ_PTR ( p ) ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
if ( nb0 > 0 ) {
rptr = & regs - > gpr [ 0 ] ;
addr + = nb ;
2006-06-07 10:14:40 +04:00
for ( i = 0 ; i < nb0 ; + + i , + + p )
2007-04-11 10:13:19 +04:00
if ( __get_user_inatomic ( REG_BYTE ( rptr ,
i ^ bswiz ) ,
SWIZ_PTR ( p ) ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
}
} else {
2006-06-07 10:14:40 +04:00
for ( i = 0 ; i < nb ; + + i , + + p )
2007-04-11 10:13:19 +04:00
if ( __put_user_inatomic ( REG_BYTE ( rptr , i ^ bswiz ) ,
SWIZ_PTR ( p ) ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
if ( nb0 > 0 ) {
rptr = & regs - > gpr [ 0 ] ;
addr + = nb ;
2006-06-07 10:14:40 +04:00
for ( i = 0 ; i < nb0 ; + + i , + + p )
2007-04-11 10:13:19 +04:00
if ( __put_user_inatomic ( REG_BYTE ( rptr ,
i ^ bswiz ) ,
SWIZ_PTR ( p ) ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
}
}
return 1 ;
2005-04-17 02:20:36 +04:00
}
2007-08-10 08:07:38 +04:00
/*
* Emulate floating - point pair loads and stores .
* Only POWER6 has these instructions , and it does true little - endian ,
* so we don ' t need the address swizzling .
*/
2008-07-08 12:53:03 +04:00
static int emulate_fp_pair ( unsigned char __user * addr , unsigned int reg ,
unsigned int flags )
2007-08-10 08:07:38 +04:00
{
2009-02-19 21:52:20 +03:00
char * ptr0 = ( char * ) & current - > thread . TS_FPR ( reg ) ;
char * ptr1 = ( char * ) & current - > thread . TS_FPR ( reg + 1 ) ;
int i , ret , sw = 0 ;
2007-08-10 08:07:38 +04:00
if ( reg & 1 )
return 0 ; /* invalid form: FRS/FRT must be even */
2009-02-19 21:52:20 +03:00
if ( flags & SW )
sw = 7 ;
ret = 0 ;
for ( i = 0 ; i < 8 ; + + i ) {
if ( ! ( flags & ST ) ) {
ret | = __get_user ( ptr0 [ i ^ sw ] , addr + i ) ;
ret | = __get_user ( ptr1 [ i ^ sw ] , addr + i + 8 ) ;
} else {
ret | = __put_user ( ptr0 [ i ^ sw ] , addr + i ) ;
ret | = __put_user ( ptr1 [ i ^ sw ] , addr + i + 8 ) ;
2007-08-10 08:07:38 +04:00
}
}
if ( ret )
return - EFAULT ;
return 1 ; /* exception handled and fixed up */
}
2014-03-28 10:01:23 +04:00
# ifdef CONFIG_PPC64
static int emulate_lq_stq ( struct pt_regs * regs , unsigned char __user * addr ,
unsigned int reg , unsigned int flags )
{
char * ptr0 = ( char * ) & regs - > gpr [ reg ] ;
char * ptr1 = ( char * ) & regs - > gpr [ reg + 1 ] ;
int i , ret , sw = 0 ;
if ( reg & 1 )
return 0 ; /* invalid form: GPR must be even */
if ( flags & SW )
sw = 7 ;
ret = 0 ;
for ( i = 0 ; i < 8 ; + + i ) {
if ( ! ( flags & ST ) ) {
ret | = __get_user ( ptr0 [ i ^ sw ] , addr + i ) ;
ret | = __get_user ( ptr1 [ i ^ sw ] , addr + i + 8 ) ;
} else {
ret | = __put_user ( ptr0 [ i ^ sw ] , addr + i ) ;
ret | = __put_user ( ptr1 [ i ^ sw ] , addr + i + 8 ) ;
}
}
if ( ret )
return - EFAULT ;
return 1 ; /* exception handled and fixed up */
}
# endif /* CONFIG_PPC64 */
2007-08-25 01:42:53 +04:00
# ifdef CONFIG_SPE
static struct aligninfo spe_aligninfo [ 32 ] = {
{ 8 , LD + E8 } , /* 0 00 00: evldd[x] */
{ 8 , LD + E4 } , /* 0 00 01: evldw[x] */
{ 8 , LD } , /* 0 00 10: evldh[x] */
INVALID , /* 0 00 11 */
{ 2 , LD } , /* 0 01 00: evlhhesplat[x] */
INVALID , /* 0 01 01 */
{ 2 , LD } , /* 0 01 10: evlhhousplat[x] */
{ 2 , LD + SE } , /* 0 01 11: evlhhossplat[x] */
{ 4 , LD } , /* 0 10 00: evlwhe[x] */
INVALID , /* 0 10 01 */
{ 4 , LD } , /* 0 10 10: evlwhou[x] */
{ 4 , LD + SE } , /* 0 10 11: evlwhos[x] */
{ 4 , LD + E4 } , /* 0 11 00: evlwwsplat[x] */
INVALID , /* 0 11 01 */
{ 4 , LD } , /* 0 11 10: evlwhsplat[x] */
INVALID , /* 0 11 11 */
{ 8 , ST + E8 } , /* 1 00 00: evstdd[x] */
{ 8 , ST + E4 } , /* 1 00 01: evstdw[x] */
{ 8 , ST } , /* 1 00 10: evstdh[x] */
INVALID , /* 1 00 11 */
INVALID , /* 1 01 00 */
INVALID , /* 1 01 01 */
INVALID , /* 1 01 10 */
INVALID , /* 1 01 11 */
{ 4 , ST } , /* 1 10 00: evstwhe[x] */
INVALID , /* 1 10 01 */
{ 4 , ST } , /* 1 10 10: evstwho[x] */
INVALID , /* 1 10 11 */
{ 4 , ST + E4 } , /* 1 11 00: evstwwe[x] */
INVALID , /* 1 11 01 */
{ 4 , ST + E4 } , /* 1 11 10: evstwwo[x] */
INVALID , /* 1 11 11 */
} ;
# define EVLDD 0x00
# define EVLDW 0x01
# define EVLDH 0x02
# define EVLHHESPLAT 0x04
# define EVLHHOUSPLAT 0x06
# define EVLHHOSSPLAT 0x07
# define EVLWHE 0x08
# define EVLWHOU 0x0A
# define EVLWHOS 0x0B
# define EVLWWSPLAT 0x0C
# define EVLWHSPLAT 0x0E
# define EVSTDD 0x10
# define EVSTDW 0x11
# define EVSTDH 0x12
# define EVSTWHE 0x18
# define EVSTWHO 0x1A
# define EVSTWWE 0x1C
# define EVSTWWO 0x1E
/*
* Emulate SPE loads and stores .
* Only Book - E has these instructions , and it does true little - endian ,
* so we don ' t need the address swizzling .
*/
static int emulate_spe ( struct pt_regs * regs , unsigned int reg ,
unsigned int instr )
{
2013-09-23 06:04:46 +04:00
int ret ;
2007-08-25 01:42:53 +04:00
union {
u64 ll ;
u32 w [ 2 ] ;
u16 h [ 4 ] ;
u8 v [ 8 ] ;
} data , temp ;
unsigned char __user * p , * addr ;
unsigned long * evr = & current - > thread . evr [ reg ] ;
unsigned int nb , flags ;
instr = ( instr > > 1 ) & 0x1f ;
/* DAR has the operand effective address */
addr = ( unsigned char __user * ) regs - > dar ;
nb = spe_aligninfo [ instr ] . len ;
flags = spe_aligninfo [ instr ] . flags ;
/* Verify the address of the operand */
if ( unlikely ( user_mode ( regs ) & &
! access_ok ( ( flags & ST ? VERIFY_WRITE : VERIFY_READ ) ,
addr , nb ) ) )
return - EFAULT ;
/* userland only */
if ( unlikely ( ! user_mode ( regs ) ) )
return 0 ;
flush_spe_to_thread ( current ) ;
/* If we are loading, get the data from user space, else
* get it from register values
*/
if ( flags & ST ) {
data . ll = 0 ;
switch ( instr ) {
case EVSTDD :
case EVSTDW :
case EVSTDH :
data . w [ 0 ] = * evr ;
data . w [ 1 ] = regs - > gpr [ reg ] ;
break ;
case EVSTWHE :
data . h [ 2 ] = * evr > > 16 ;
data . h [ 3 ] = regs - > gpr [ reg ] > > 16 ;
break ;
case EVSTWHO :
data . h [ 2 ] = * evr & 0xffff ;
data . h [ 3 ] = regs - > gpr [ reg ] & 0xffff ;
break ;
case EVSTWWE :
data . w [ 1 ] = * evr ;
break ;
case EVSTWWO :
data . w [ 1 ] = regs - > gpr [ reg ] ;
break ;
default :
return - EINVAL ;
}
} else {
temp . ll = data . ll = 0 ;
ret = 0 ;
p = addr ;
switch ( nb ) {
case 8 :
ret | = __get_user_inatomic ( temp . v [ 0 ] , p + + ) ;
ret | = __get_user_inatomic ( temp . v [ 1 ] , p + + ) ;
ret | = __get_user_inatomic ( temp . v [ 2 ] , p + + ) ;
ret | = __get_user_inatomic ( temp . v [ 3 ] , p + + ) ;
case 4 :
ret | = __get_user_inatomic ( temp . v [ 4 ] , p + + ) ;
ret | = __get_user_inatomic ( temp . v [ 5 ] , p + + ) ;
case 2 :
ret | = __get_user_inatomic ( temp . v [ 6 ] , p + + ) ;
ret | = __get_user_inatomic ( temp . v [ 7 ] , p + + ) ;
if ( unlikely ( ret ) )
return - EFAULT ;
}
switch ( instr ) {
case EVLDD :
case EVLDW :
case EVLDH :
data . ll = temp . ll ;
break ;
case EVLHHESPLAT :
data . h [ 0 ] = temp . h [ 3 ] ;
data . h [ 2 ] = temp . h [ 3 ] ;
break ;
case EVLHHOUSPLAT :
case EVLHHOSSPLAT :
data . h [ 1 ] = temp . h [ 3 ] ;
data . h [ 3 ] = temp . h [ 3 ] ;
break ;
case EVLWHE :
data . h [ 0 ] = temp . h [ 2 ] ;
data . h [ 2 ] = temp . h [ 3 ] ;
break ;
case EVLWHOU :
case EVLWHOS :
data . h [ 1 ] = temp . h [ 2 ] ;
data . h [ 3 ] = temp . h [ 3 ] ;
break ;
case EVLWWSPLAT :
data . w [ 0 ] = temp . w [ 1 ] ;
data . w [ 1 ] = temp . w [ 1 ] ;
break ;
case EVLWHSPLAT :
data . h [ 0 ] = temp . h [ 2 ] ;
data . h [ 1 ] = temp . h [ 2 ] ;
data . h [ 2 ] = temp . h [ 3 ] ;
data . h [ 3 ] = temp . h [ 3 ] ;
break ;
default :
return - EINVAL ;
}
}
if ( flags & SW ) {
switch ( flags & 0xf0 ) {
case E8 :
2013-09-23 06:04:46 +04:00
data . ll = swab64 ( data . ll ) ;
2007-08-25 01:42:53 +04:00
break ;
case E4 :
2013-09-23 06:04:46 +04:00
data . w [ 0 ] = swab32 ( data . w [ 0 ] ) ;
data . w [ 1 ] = swab32 ( data . w [ 1 ] ) ;
2007-08-25 01:42:53 +04:00
break ;
/* Its half word endian */
default :
2013-09-23 06:04:46 +04:00
data . h [ 0 ] = swab16 ( data . h [ 0 ] ) ;
data . h [ 1 ] = swab16 ( data . h [ 1 ] ) ;
data . h [ 2 ] = swab16 ( data . h [ 2 ] ) ;
data . h [ 3 ] = swab16 ( data . h [ 3 ] ) ;
2007-08-25 01:42:53 +04:00
break ;
}
}
if ( flags & SE ) {
data . w [ 0 ] = ( s16 ) data . h [ 1 ] ;
data . w [ 1 ] = ( s16 ) data . h [ 3 ] ;
}
/* Store result to memory or update registers */
if ( flags & ST ) {
ret = 0 ;
p = addr ;
switch ( nb ) {
case 8 :
ret | = __put_user_inatomic ( data . v [ 0 ] , p + + ) ;
ret | = __put_user_inatomic ( data . v [ 1 ] , p + + ) ;
ret | = __put_user_inatomic ( data . v [ 2 ] , p + + ) ;
ret | = __put_user_inatomic ( data . v [ 3 ] , p + + ) ;
case 4 :
ret | = __put_user_inatomic ( data . v [ 4 ] , p + + ) ;
ret | = __put_user_inatomic ( data . v [ 5 ] , p + + ) ;
case 2 :
ret | = __put_user_inatomic ( data . v [ 6 ] , p + + ) ;
ret | = __put_user_inatomic ( data . v [ 7 ] , p + + ) ;
}
if ( unlikely ( ret ) )
return - EFAULT ;
} else {
* evr = data . w [ 0 ] ;
regs - > gpr [ reg ] = data . w [ 1 ] ;
}
return 1 ;
}
# endif /* CONFIG_SPE */
2005-11-18 06:09:41 +03:00
2013-09-23 06:04:50 +04:00
# ifdef CONFIG_VSX
2008-07-11 10:31:09 +04:00
/*
* Emulate VSX instructions . . .
*/
static int emulate_vsx ( unsigned char __user * addr , unsigned int reg ,
unsigned int areg , struct pt_regs * regs ,
2009-12-14 07:08:57 +03:00
unsigned int flags , unsigned int length ,
unsigned int elsize )
2008-07-11 10:31:09 +04:00
{
2009-02-12 22:08:58 +03:00
char * ptr ;
2009-12-14 07:08:57 +03:00
unsigned long * lptr ;
2008-08-28 08:57:39 +04:00
int ret = 0 ;
2009-12-14 07:08:57 +03:00
int sw = 0 ;
int i , j ;
2008-07-11 10:31:09 +04:00
2013-08-20 14:30:07 +04:00
/* userland only */
if ( unlikely ( ! user_mode ( regs ) ) )
return 0 ;
2008-07-11 10:31:09 +04:00
flush_vsx_to_thread ( current ) ;
2009-02-12 22:08:58 +03:00
if ( reg < 32 )
2013-10-11 11:23:53 +04:00
ptr = ( char * ) & current - > thread . fp_state . fpr [ reg ] [ 0 ] ;
2009-02-12 22:08:58 +03:00
else
2013-09-10 14:20:42 +04:00
ptr = ( char * ) & current - > thread . vr_state . vr [ reg - 32 ] ;
2009-02-12 22:08:58 +03:00
2009-12-14 07:08:57 +03:00
lptr = ( unsigned long * ) ptr ;
2013-09-23 06:04:50 +04:00
# ifdef __LITTLE_ENDIAN__
if ( flags & SW ) {
elsize = length ;
sw = length - 1 ;
} else {
/*
* The elements are BE ordered , even in LE mode , so process
* them in reverse order .
*/
addr + = length - elsize ;
/* 8 byte memory accesses go in the top 8 bytes of the VR */
if ( length = = 8 )
ptr + = 8 ;
}
# else
2009-12-14 07:08:57 +03:00
if ( flags & SW )
sw = elsize - 1 ;
2013-09-23 06:04:50 +04:00
# endif
2009-12-14 07:08:57 +03:00
for ( j = 0 ; j < length ; j + = elsize ) {
for ( i = 0 ; i < elsize ; + + i ) {
if ( flags & ST )
ret | = __put_user ( ptr [ i ^ sw ] , addr + i ) ;
else
ret | = __get_user ( ptr [ i ^ sw ] , addr + i ) ;
2008-07-11 10:31:09 +04:00
}
2009-12-14 07:08:57 +03:00
ptr + = elsize ;
2013-09-23 06:04:50 +04:00
# ifdef __LITTLE_ENDIAN__
addr - = elsize ;
# else
2009-12-14 07:08:57 +03:00
addr + = elsize ;
2013-09-23 06:04:50 +04:00
# endif
2008-07-11 10:31:09 +04:00
}
2009-12-14 07:08:57 +03:00
2013-09-23 06:04:50 +04:00
# ifdef __BIG_ENDIAN__
# define VSX_HI 0
# define VSX_LO 1
# else
# define VSX_HI 1
# define VSX_LO 0
# endif
2009-12-14 07:08:57 +03:00
if ( ! ret ) {
if ( flags & U )
regs - > gpr [ areg ] = regs - > dar ;
/* Splat load copies the same data to top and bottom 8 bytes */
if ( flags & SPLT )
2013-09-23 06:04:50 +04:00
lptr [ VSX_LO ] = lptr [ VSX_HI ] ;
/* For 8 byte loads, zero the low 8 bytes */
2009-12-14 07:08:57 +03:00
else if ( ! ( flags & ST ) & & ( 8 = = length ) )
2013-09-23 06:04:50 +04:00
lptr [ VSX_LO ] = 0 ;
2009-12-14 07:08:57 +03:00
} else
2008-07-11 10:31:09 +04:00
return - EFAULT ;
2009-12-14 07:08:57 +03:00
2008-07-11 10:31:09 +04:00
return 1 ;
}
# endif
2005-11-18 06:09:41 +03:00
/*
* Called on alignment exception . Attempts to fixup
*
* Return 1 on success
* Return 0 if unable to handle the interrupt
* Return - EFAULT if data address is bad
*/
int fix_alignment ( struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2008-07-11 10:31:09 +04:00
unsigned int instr , nb , flags , instruction = 0 ;
2005-11-18 06:09:41 +03:00
unsigned int reg , areg ;
unsigned int dsisr ;
2005-04-17 02:20:36 +04:00
unsigned char __user * addr ;
2006-06-07 10:14:40 +04:00
unsigned long p , swiz ;
2013-09-23 06:04:49 +04:00
int ret , i ;
union data {
2005-11-18 06:09:41 +03:00
u64 ll ;
2005-04-17 02:20:36 +04:00
double dd ;
unsigned char v [ 8 ] ;
struct {
2013-09-23 06:04:49 +04:00
# ifdef __LITTLE_ENDIAN__
int low32 ;
unsigned hi32 ;
# else
2005-04-17 02:20:36 +04:00
unsigned hi32 ;
int low32 ;
2013-09-23 06:04:49 +04:00
# endif
2005-04-17 02:20:36 +04:00
} x32 ;
struct {
2013-09-23 06:04:49 +04:00
# ifdef __LITTLE_ENDIAN__
short low16 ;
unsigned char hi48 [ 6 ] ;
# else
2005-04-17 02:20:36 +04:00
unsigned char hi48 [ 6 ] ;
short low16 ;
2013-09-23 06:04:49 +04:00
# endif
2005-04-17 02:20:36 +04:00
} x16 ;
} data ;
/*
2005-11-18 06:09:41 +03:00
* We require a complete register set , if not , then our assembly
* is broken
2005-04-17 02:20:36 +04:00
*/
2005-11-18 06:09:41 +03:00
CHECK_FULL_REGS ( regs ) ;
2005-04-17 02:20:36 +04:00
dsisr = regs - > dsisr ;
2005-11-18 06:09:41 +03:00
/* Some processors don't provide us with a DSISR we can use here,
* let ' s make one up from the instruction
*/
2005-04-17 02:20:36 +04:00
if ( cpu_has_feature ( CPU_FTR_NODSISRALIGN ) ) {
2006-06-07 10:14:40 +04:00
unsigned long pc = regs - > nip ;
if ( cpu_has_feature ( CPU_FTR_PPC_LE ) & & ( regs - > msr & MSR_LE ) )
pc ^ = 4 ;
2007-04-11 10:13:19 +04:00
if ( unlikely ( __get_user_inatomic ( instr ,
( unsigned int __user * ) pc ) ) )
2005-11-18 06:09:41 +03:00
return - EFAULT ;
2006-06-07 10:14:40 +04:00
if ( cpu_has_feature ( CPU_FTR_REAL_LE ) & & ( regs - > msr & MSR_LE ) )
instr = cpu_to_le32 ( instr ) ;
dsisr = make_dsisr ( instr ) ;
2008-07-11 10:31:09 +04:00
instruction = instr ;
2005-04-17 02:20:36 +04:00
}
/* extract the operation and registers from the dsisr */
reg = ( dsisr > > 5 ) & 0x1f ; /* source/dest register */
areg = dsisr & 0x1f ; /* register to update */
2007-08-25 01:42:53 +04:00
# ifdef CONFIG_SPE
2009-05-18 06:10:05 +04:00
if ( ( instr > > 26 ) = = 0x4 ) {
2009-10-27 21:46:55 +03:00
PPC_WARN_ALIGNMENT ( spe , regs ) ;
2007-08-25 01:42:53 +04:00
return emulate_spe ( regs , reg , instr ) ;
2009-05-18 06:10:05 +04:00
}
2007-08-25 01:42:53 +04:00
# endif
2005-04-17 02:20:36 +04:00
instr = ( dsisr > > 10 ) & 0x7f ;
instr | = ( dsisr > > 13 ) & 0x60 ;
/* Lookup the operation in our table */
nb = aligninfo [ instr ] . len ;
flags = aligninfo [ instr ] . flags ;
2013-08-06 20:01:19 +04:00
/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
if ( IS_XFORM ( instruction ) & & ( ( instruction > > 1 ) & 0x3ff ) = = 532 ) {
nb = 8 ;
flags = LD + SW ;
} else if ( IS_XFORM ( instruction ) & &
( ( instruction > > 1 ) & 0x3ff ) = = 660 ) {
nb = 8 ;
flags = ST + SW ;
}
2006-06-07 10:14:40 +04:00
/* Byteswap little endian loads and stores */
swiz = 0 ;
2013-09-23 06:04:49 +04:00
if ( ( regs - > msr & MSR_LE ) ! = ( MSR_KERNEL & MSR_LE ) ) {
2006-06-07 10:14:40 +04:00
flags ^ = SW ;
2013-09-23 06:04:49 +04:00
# ifdef __BIG_ENDIAN__
2006-06-07 10:14:40 +04:00
/*
* So - called " PowerPC little endian " mode works by
* swizzling addresses rather than by actually doing
* any byte - swapping . To emulate this , we XOR each
* byte address with 7. We also byte - swap , because
* the processor ' s address swizzling depends on the
* operand size ( it xors the address with 7 for bytes ,
* 6 for halfwords , 4 for words , 0 for doublewords ) but
* we will xor with 7 and load / store each byte separately .
*/
if ( cpu_has_feature ( CPU_FTR_PPC_LE ) )
swiz = 7 ;
2013-09-23 06:04:49 +04:00
# endif
2006-06-07 10:14:40 +04:00
}
2005-04-17 02:20:36 +04:00
/* DAR has the operand effective address */
addr = ( unsigned char __user * ) regs - > dar ;
2008-07-11 10:31:09 +04:00
# ifdef CONFIG_VSX
if ( ( instruction & 0xfc00003e ) = = 0x7c000018 ) {
2009-12-14 07:08:57 +03:00
unsigned int elsize ;
/* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
2008-07-11 10:31:09 +04:00
reg | = ( instruction & 0x1 ) < < 5 ;
/* Simple inline decoder instead of a table */
2009-12-14 07:08:57 +03:00
/* VSX has only 8 and 16 byte memory accesses */
nb = 8 ;
2008-07-11 10:31:09 +04:00
if ( instruction & 0x200 )
nb = 16 ;
2009-12-14 07:08:57 +03:00
/* Vector stores in little-endian mode swap individual
elements , so process them separately */
elsize = 4 ;
if ( instruction & 0x80 )
elsize = 8 ;
2008-07-11 10:31:09 +04:00
flags = 0 ;
2013-09-23 06:04:49 +04:00
if ( ( regs - > msr & MSR_LE ) ! = ( MSR_KERNEL & MSR_LE ) )
2009-12-14 07:08:57 +03:00
flags | = SW ;
2008-07-11 10:31:09 +04:00
if ( instruction & 0x100 )
flags | = ST ;
if ( instruction & 0x040 )
flags | = U ;
/* splat load needs a special decoder */
if ( ( instruction & 0x400 ) = = 0 ) {
flags | = SPLT ;
nb = 8 ;
}
2009-10-27 21:46:55 +03:00
PPC_WARN_ALIGNMENT ( vsx , regs ) ;
2009-12-14 07:08:57 +03:00
return emulate_vsx ( addr , reg , areg , regs , flags , nb , elsize ) ;
2008-07-11 10:31:09 +04:00
}
# endif
2016-06-17 02:33:45 +03:00
/*
* ISA 3.0 ( such as P9 ) copy , copy_first , paste and paste_last alignment
* check .
*
* Send a SIGBUS to the process that caused the fault .
*
* We do not emulate these because paste may contain additional metadata
* when pasting to a co - processor . Furthermore , paste_last is the
* synchronisation point for preceding copy / paste sequences .
*/
if ( ( instruction & 0xfc0006fe ) = = PPC_INST_COPY )
return - EIO ;
2005-11-18 06:09:41 +03:00
/* A size of 0 indicates an instruction we don't support, with
* the exception of DCBZ which is handled as a special case here
2005-04-17 02:20:36 +04:00
*/
2009-05-18 06:10:05 +04:00
if ( instr = = DCBZ ) {
2009-10-27 21:46:55 +03:00
PPC_WARN_ALIGNMENT ( dcbz , regs ) ;
2005-11-18 06:09:41 +03:00
return emulate_dcbz ( regs , addr ) ;
2009-05-18 06:10:05 +04:00
}
2005-11-18 06:09:41 +03:00
if ( unlikely ( nb = = 0 ) )
return 0 ;
/* Load/Store Multiple instructions are handled in their own
* function
*/
2009-05-18 06:10:05 +04:00
if ( flags & M ) {
2009-10-27 21:46:55 +03:00
PPC_WARN_ALIGNMENT ( multiple , regs ) ;
2006-06-07 10:14:40 +04:00
return emulate_multiple ( regs , addr , reg , nb ,
flags , instr , swiz ) ;
2009-05-18 06:10:05 +04:00
}
2005-04-17 02:20:36 +04:00
/* Verify the address of the operand */
2005-11-18 06:09:41 +03:00
if ( unlikely ( user_mode ( regs ) & &
! access_ok ( ( flags & ST ? VERIFY_WRITE : VERIFY_READ ) ,
addr , nb ) ) )
return - EFAULT ;
2005-04-17 02:20:36 +04:00
/* Force the fprs into the save area so we can reference them */
if ( flags & F ) {
2005-11-18 06:09:41 +03:00
/* userland only */
if ( unlikely ( ! user_mode ( regs ) ) )
2005-04-17 02:20:36 +04:00
return 0 ;
flush_fp_to_thread ( current ) ;
}
2005-11-18 06:09:41 +03:00
2014-10-31 06:47:26 +03:00
if ( nb = = 16 ) {
2014-03-28 10:01:23 +04:00
if ( flags & F ) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT ( fp_pair , regs ) ;
return emulate_fp_pair ( addr , reg , flags ) ;
} else {
# ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
PPC_WARN_ALIGNMENT ( lq_stq , regs ) ;
return emulate_lq_stq ( regs , addr , reg , flags ) ;
# else
return 0 ;
# endif
}
2009-05-18 06:10:05 +04:00
}
2009-10-27 21:46:55 +03:00
PPC_WARN_ALIGNMENT ( unaligned , regs ) ;
2007-08-10 08:07:38 +04:00
2005-11-18 06:09:41 +03:00
/* If we are loading, get the data from user space, else
* get it from register values
*/
2006-06-07 10:14:40 +04:00
if ( ! ( flags & ST ) ) {
2013-09-23 06:04:49 +04:00
unsigned int start = 0 ;
2005-04-17 02:20:36 +04:00
switch ( nb ) {
case 4 :
2013-09-23 06:04:49 +04:00
start = offsetof ( union data , x32 . low32 ) ;
break ;
2005-04-17 02:20:36 +04:00
case 2 :
2013-09-23 06:04:49 +04:00
start = offsetof ( union data , x16 . low16 ) ;
break ;
2005-04-17 02:20:36 +04:00
}
2013-09-23 06:04:49 +04:00
data . ll = 0 ;
ret = 0 ;
p = ( unsigned long ) addr ;
for ( i = 0 ; i < nb ; i + + )
ret | = __get_user_inatomic ( data . v [ start + i ] ,
SWIZ_PTR ( p + + ) ) ;
if ( unlikely ( ret ) )
return - EFAULT ;
2006-06-07 10:14:40 +04:00
} else if ( flags & F ) {
2013-09-10 14:20:42 +04:00
data . ll = current - > thread . TS_FPR ( reg ) ;
2006-06-07 10:14:40 +04:00
if ( flags & S ) {
/* Single-precision FP store requires conversion... */
# ifdef CONFIG_PPC_FPU
preempt_disable ( ) ;
enable_kernel_fp ( ) ;
2013-09-23 06:04:47 +04:00
cvt_df ( & data . dd , ( float * ) & data . x32 . low32 ) ;
2015-10-29 03:44:05 +03:00
disable_kernel_fp ( ) ;
2006-06-07 10:14:40 +04:00
preempt_enable ( ) ;
# else
return 0 ;
# endif
}
} else
2005-11-18 06:09:41 +03:00
data . ll = regs - > gpr [ reg ] ;
2006-06-07 10:14:40 +04:00
if ( flags & SW ) {
switch ( nb ) {
case 8 :
2013-09-23 06:04:46 +04:00
data . ll = swab64 ( data . ll ) ;
2006-06-07 10:14:40 +04:00
break ;
case 4 :
2013-09-23 06:04:46 +04:00
data . x32 . low32 = swab32 ( data . x32 . low32 ) ;
2006-06-07 10:14:40 +04:00
break ;
case 2 :
2013-09-23 06:04:46 +04:00
data . x16 . low16 = swab16 ( data . x16 . low16 ) ;
2006-06-07 10:14:40 +04:00
break ;
}
}
/* Perform other misc operations like sign extension
2005-11-18 06:09:41 +03:00
* or floating point single precision conversion
*/
2006-06-07 10:14:40 +04:00
switch ( flags & ~ ( U | SW ) ) {
2007-08-10 08:07:38 +04:00
case LD + SE : /* sign extending integer loads */
case LD + F + SE : /* sign extend for lfiwax */
2005-04-17 02:20:36 +04:00
if ( nb = = 2 )
data . ll = data . x16 . low16 ;
else /* nb must be 4 */
data . ll = data . x32 . low32 ;
2005-11-18 06:09:41 +03:00
break ;
2006-06-07 10:14:40 +04:00
/* Single-precision FP load requires conversion... */
2005-11-18 06:09:41 +03:00
case LD + F + S :
# ifdef CONFIG_PPC_FPU
preempt_disable ( ) ;
enable_kernel_fp ( ) ;
2013-09-23 06:04:47 +04:00
cvt_fd ( ( float * ) & data . x32 . low32 , & data . dd ) ;
2015-10-29 03:44:05 +03:00
disable_kernel_fp ( ) ;
2005-11-18 06:09:41 +03:00
preempt_enable ( ) ;
# else
return 0 ;
# endif
break ;
2005-04-17 02:20:36 +04:00
}
2005-11-18 06:09:41 +03:00
/* Store result to memory or update registers */
2005-04-17 02:20:36 +04:00
if ( flags & ST ) {
2013-09-23 06:04:49 +04:00
unsigned int start = 0 ;
2005-04-17 02:20:36 +04:00
switch ( nb ) {
case 4 :
2013-09-23 06:04:49 +04:00
start = offsetof ( union data , x32 . low32 ) ;
break ;
2005-04-17 02:20:36 +04:00
case 2 :
2013-09-23 06:04:49 +04:00
start = offsetof ( union data , x16 . low16 ) ;
break ;
2005-04-17 02:20:36 +04:00
}
2013-09-23 06:04:49 +04:00
ret = 0 ;
p = ( unsigned long ) addr ;
for ( i = 0 ; i < nb ; i + + )
ret | = __put_user_inatomic ( data . v [ start + i ] ,
SWIZ_PTR ( p + + ) ) ;
2005-11-18 06:09:41 +03:00
if ( unlikely ( ret ) )
2005-04-17 02:20:36 +04:00
return - EFAULT ;
2005-11-18 06:09:41 +03:00
} else if ( flags & F )
2013-09-10 14:20:42 +04:00
current - > thread . TS_FPR ( reg ) = data . ll ;
2005-11-18 06:09:41 +03:00
else
regs - > gpr [ reg ] = data . ll ;
2005-04-17 02:20:36 +04:00
/* Update RA as needed */
2005-11-18 06:09:41 +03:00
if ( flags & U )
2005-04-17 02:20:36 +04:00
regs - > gpr [ areg ] = regs - > dar ;
return 1 ;
}