2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/* align.c - handle alignment exceptions for the Power PC.
*
* Copyright ( c ) 1996 Paul Mackerras < paulus @ cs . anu . edu . au >
* Copyright ( c ) 1998 - 1999 TiVo , Inc .
* PowerPC 403 GCX modifications .
* Copyright ( c ) 1999 Grant Erickson < grant @ lcse . umn . edu >
* PowerPC 403 GCX / 405 GP modifications .
* Copyright ( c ) 2001 - 2002 PPC64 team , IBM Corp
* 64 - bit and Power4 support
2005-11-18 06:09:41 +03:00
* Copyright ( c ) 2005 Benjamin Herrenschmidt , IBM Corp
* < benh @ kernel . crashing . org >
* Merge ppc32 and ppc64 implementations
2005-04-17 02:20:36 +04:00
*/
# include <linux/kernel.h>
# include <linux/mm.h>
# include <asm/processor.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2005-04-17 02:20:36 +04:00
# include <asm/cache.h>
# include <asm/cputable.h>
2009-05-18 06:10:05 +04:00
# include <asm/emulated_ops.h>
2012-03-28 21:30:02 +04:00
# include <asm/switch_to.h>
2014-05-12 15:34:06 +04:00
# include <asm/disassemble.h>
2016-07-23 12:12:40 +03:00
# include <asm/cpu_has_feature.h>
2017-08-30 07:12:40 +03:00
# include <asm/sstep.h>
2020-05-06 06:40:26 +03:00
# include <asm/inst.h>
2005-04-17 02:20:36 +04:00
struct aligninfo {
unsigned char len ;
unsigned char flags ;
} ;
# define INVALID { 0, 0 }
2006-06-07 10:14:40 +04:00
/* Bits in the flags field */
# define LD 0 /* load */
# define ST 1 /* store */
2007-08-10 08:07:38 +04:00
# define SE 2 /* sign-extend value, or FP ld/st as word */
2006-06-07 10:14:40 +04:00
# define SW 0x20 /* byte swap */
2007-08-25 01:42:53 +04:00
# define E4 0x40 /* SPE endianness is word */
# define E8 0x80 /* SPE endianness is double word */
2014-03-28 10:01:23 +04:00
2007-08-25 01:42:53 +04:00
# ifdef CONFIG_SPE
static struct aligninfo spe_aligninfo [ 32 ] = {
{ 8 , LD + E8 } , /* 0 00 00: evldd[x] */
{ 8 , LD + E4 } , /* 0 00 01: evldw[x] */
{ 8 , LD } , /* 0 00 10: evldh[x] */
INVALID , /* 0 00 11 */
{ 2 , LD } , /* 0 01 00: evlhhesplat[x] */
INVALID , /* 0 01 01 */
{ 2 , LD } , /* 0 01 10: evlhhousplat[x] */
{ 2 , LD + SE } , /* 0 01 11: evlhhossplat[x] */
{ 4 , LD } , /* 0 10 00: evlwhe[x] */
INVALID , /* 0 10 01 */
{ 4 , LD } , /* 0 10 10: evlwhou[x] */
{ 4 , LD + SE } , /* 0 10 11: evlwhos[x] */
{ 4 , LD + E4 } , /* 0 11 00: evlwwsplat[x] */
INVALID , /* 0 11 01 */
{ 4 , LD } , /* 0 11 10: evlwhsplat[x] */
INVALID , /* 0 11 11 */
{ 8 , ST + E8 } , /* 1 00 00: evstdd[x] */
{ 8 , ST + E4 } , /* 1 00 01: evstdw[x] */
{ 8 , ST } , /* 1 00 10: evstdh[x] */
INVALID , /* 1 00 11 */
INVALID , /* 1 01 00 */
INVALID , /* 1 01 01 */
INVALID , /* 1 01 10 */
INVALID , /* 1 01 11 */
{ 4 , ST } , /* 1 10 00: evstwhe[x] */
INVALID , /* 1 10 01 */
{ 4 , ST } , /* 1 10 10: evstwho[x] */
INVALID , /* 1 10 11 */
{ 4 , ST + E4 } , /* 1 11 00: evstwwe[x] */
INVALID , /* 1 11 01 */
{ 4 , ST + E4 } , /* 1 11 10: evstwwo[x] */
INVALID , /* 1 11 11 */
} ;
# define EVLDD 0x00
# define EVLDW 0x01
# define EVLDH 0x02
# define EVLHHESPLAT 0x04
# define EVLHHOUSPLAT 0x06
# define EVLHHOSSPLAT 0x07
# define EVLWHE 0x08
# define EVLWHOU 0x0A
# define EVLWHOS 0x0B
# define EVLWWSPLAT 0x0C
# define EVLWHSPLAT 0x0E
# define EVSTDD 0x10
# define EVSTDW 0x11
# define EVSTDH 0x12
# define EVSTWHE 0x18
# define EVSTWHO 0x1A
# define EVSTWWE 0x1C
# define EVSTWWO 0x1E
/*
* Emulate SPE loads and stores .
* Only Book - E has these instructions , and it does true little - endian ,
* so we don ' t need the address swizzling .
*/
static int emulate_spe ( struct pt_regs * regs , unsigned int reg ,
2020-05-06 06:40:31 +03:00
struct ppc_inst ppc_instr )
2007-08-25 01:42:53 +04:00
{
union {
u64 ll ;
u32 w [ 2 ] ;
u16 h [ 4 ] ;
u8 v [ 8 ] ;
} data , temp ;
unsigned char __user * p , * addr ;
unsigned long * evr = & current - > thread . evr [ reg ] ;
2020-05-06 06:40:31 +03:00
unsigned int nb , flags , instr ;
2007-08-25 01:42:53 +04:00
2020-05-06 06:40:31 +03:00
instr = ppc_inst_val ( ppc_instr ) ;
2007-08-25 01:42:53 +04:00
instr = ( instr > > 1 ) & 0x1f ;
/* DAR has the operand effective address */
addr = ( unsigned char __user * ) regs - > dar ;
nb = spe_aligninfo [ instr ] . len ;
flags = spe_aligninfo [ instr ] . flags ;
/* userland only */
if ( unlikely ( ! user_mode ( regs ) ) )
return 0 ;
flush_spe_to_thread ( current ) ;
/* If we are loading, get the data from user space, else
* get it from register values
*/
if ( flags & ST ) {
data . ll = 0 ;
switch ( instr ) {
case EVSTDD :
case EVSTDW :
case EVSTDH :
data . w [ 0 ] = * evr ;
data . w [ 1 ] = regs - > gpr [ reg ] ;
break ;
case EVSTWHE :
data . h [ 2 ] = * evr > > 16 ;
data . h [ 3 ] = regs - > gpr [ reg ] > > 16 ;
break ;
case EVSTWHO :
data . h [ 2 ] = * evr & 0xffff ;
data . h [ 3 ] = regs - > gpr [ reg ] & 0xffff ;
break ;
case EVSTWWE :
data . w [ 1 ] = * evr ;
break ;
case EVSTWWO :
data . w [ 1 ] = regs - > gpr [ reg ] ;
break ;
default :
return - EINVAL ;
}
} else {
temp . ll = data . ll = 0 ;
p = addr ;
2021-03-12 16:25:11 +03:00
if ( ! user_read_access_begin ( addr , nb ) )
return - EFAULT ;
2007-08-25 01:42:53 +04:00
switch ( nb ) {
case 8 :
2021-03-12 16:25:11 +03:00
unsafe_get_user ( temp . v [ 0 ] , p + + , Efault_read ) ;
unsafe_get_user ( temp . v [ 1 ] , p + + , Efault_read ) ;
unsafe_get_user ( temp . v [ 2 ] , p + + , Efault_read ) ;
unsafe_get_user ( temp . v [ 3 ] , p + + , Efault_read ) ;
2020-07-28 01:42:01 +03:00
fallthrough ;
2007-08-25 01:42:53 +04:00
case 4 :
2021-03-12 16:25:11 +03:00
unsafe_get_user ( temp . v [ 4 ] , p + + , Efault_read ) ;
unsafe_get_user ( temp . v [ 5 ] , p + + , Efault_read ) ;
2020-07-28 01:42:01 +03:00
fallthrough ;
2007-08-25 01:42:53 +04:00
case 2 :
2021-03-12 16:25:11 +03:00
unsafe_get_user ( temp . v [ 6 ] , p + + , Efault_read ) ;
unsafe_get_user ( temp . v [ 7 ] , p + + , Efault_read ) ;
2007-08-25 01:42:53 +04:00
}
2021-03-12 16:25:11 +03:00
user_read_access_end ( ) ;
2007-08-25 01:42:53 +04:00
switch ( instr ) {
case EVLDD :
case EVLDW :
case EVLDH :
data . ll = temp . ll ;
break ;
case EVLHHESPLAT :
data . h [ 0 ] = temp . h [ 3 ] ;
data . h [ 2 ] = temp . h [ 3 ] ;
break ;
case EVLHHOUSPLAT :
case EVLHHOSSPLAT :
data . h [ 1 ] = temp . h [ 3 ] ;
data . h [ 3 ] = temp . h [ 3 ] ;
break ;
case EVLWHE :
data . h [ 0 ] = temp . h [ 2 ] ;
data . h [ 2 ] = temp . h [ 3 ] ;
break ;
case EVLWHOU :
case EVLWHOS :
data . h [ 1 ] = temp . h [ 2 ] ;
data . h [ 3 ] = temp . h [ 3 ] ;
break ;
case EVLWWSPLAT :
data . w [ 0 ] = temp . w [ 1 ] ;
data . w [ 1 ] = temp . w [ 1 ] ;
break ;
case EVLWHSPLAT :
data . h [ 0 ] = temp . h [ 2 ] ;
data . h [ 1 ] = temp . h [ 2 ] ;
data . h [ 2 ] = temp . h [ 3 ] ;
data . h [ 3 ] = temp . h [ 3 ] ;
break ;
default :
return - EINVAL ;
}
}
if ( flags & SW ) {
switch ( flags & 0xf0 ) {
case E8 :
2013-09-23 06:04:46 +04:00
data . ll = swab64 ( data . ll ) ;
2007-08-25 01:42:53 +04:00
break ;
case E4 :
2013-09-23 06:04:46 +04:00
data . w [ 0 ] = swab32 ( data . w [ 0 ] ) ;
data . w [ 1 ] = swab32 ( data . w [ 1 ] ) ;
2007-08-25 01:42:53 +04:00
break ;
/* Its half word endian */
default :
2013-09-23 06:04:46 +04:00
data . h [ 0 ] = swab16 ( data . h [ 0 ] ) ;
data . h [ 1 ] = swab16 ( data . h [ 1 ] ) ;
data . h [ 2 ] = swab16 ( data . h [ 2 ] ) ;
data . h [ 3 ] = swab16 ( data . h [ 3 ] ) ;
2007-08-25 01:42:53 +04:00
break ;
}
}
if ( flags & SE ) {
data . w [ 0 ] = ( s16 ) data . h [ 1 ] ;
data . w [ 1 ] = ( s16 ) data . h [ 3 ] ;
}
/* Store result to memory or update registers */
if ( flags & ST ) {
p = addr ;
2021-03-12 16:25:11 +03:00
if ( ! user_write_access_begin ( addr , nb ) )
return - EFAULT ;
2007-08-25 01:42:53 +04:00
switch ( nb ) {
case 8 :
2021-03-12 16:25:11 +03:00
unsafe_put_user ( data . v [ 0 ] , p + + , Efault_write ) ;
unsafe_put_user ( data . v [ 1 ] , p + + , Efault_write ) ;
unsafe_put_user ( data . v [ 2 ] , p + + , Efault_write ) ;
unsafe_put_user ( data . v [ 3 ] , p + + , Efault_write ) ;
2020-07-28 01:42:01 +03:00
fallthrough ;
2007-08-25 01:42:53 +04:00
case 4 :
2021-03-12 16:25:11 +03:00
unsafe_put_user ( data . v [ 4 ] , p + + , Efault_write ) ;
unsafe_put_user ( data . v [ 5 ] , p + + , Efault_write ) ;
2020-07-28 01:42:01 +03:00
fallthrough ;
2007-08-25 01:42:53 +04:00
case 2 :
2021-03-12 16:25:11 +03:00
unsafe_put_user ( data . v [ 6 ] , p + + , Efault_write ) ;
unsafe_put_user ( data . v [ 7 ] , p + + , Efault_write ) ;
2007-08-25 01:42:53 +04:00
}
2021-03-12 16:25:11 +03:00
user_write_access_end ( ) ;
2007-08-25 01:42:53 +04:00
} else {
* evr = data . w [ 0 ] ;
regs - > gpr [ reg ] = data . w [ 1 ] ;
}
return 1 ;
2021-03-12 16:25:11 +03:00
Efault_read :
user_read_access_end ( ) ;
return - EFAULT ;
Efault_write :
user_write_access_end ( ) ;
return - EFAULT ;
2007-08-25 01:42:53 +04:00
}
# endif /* CONFIG_SPE */
2005-11-18 06:09:41 +03:00
/*
* Called on alignment exception . Attempts to fixup
*
* Return 1 on success
* Return 0 if unable to handle the interrupt
* Return - EFAULT if data address is bad
2017-08-30 07:12:40 +03:00
* Other negative return values indicate that the instruction can ' t
* be emulated , and the process should be given a SIGBUS .
2005-11-18 06:09:41 +03:00
*/
int fix_alignment ( struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2020-05-06 06:40:31 +03:00
struct ppc_inst instr ;
2017-08-30 07:12:40 +03:00
struct instruction_op op ;
int r , type ;
2005-04-17 02:20:36 +04:00
2021-03-10 20:46:45 +03:00
if ( is_kernel_addr ( regs - > nip ) )
2021-04-14 16:08:42 +03:00
r = copy_inst_from_kernel_nofault ( & instr , ( void * ) regs - > nip ) ;
2021-03-10 20:46:45 +03:00
else
r = __get_user_instr ( instr , ( void __user * ) regs - > nip ) ;
if ( unlikely ( r ) )
2017-08-30 07:12:40 +03:00
return - EFAULT ;
if ( ( regs - > msr & MSR_LE ) ! = ( MSR_KERNEL & MSR_LE ) ) {
/* We don't handle PPC little-endian any more... */
if ( cpu_has_feature ( CPU_FTR_PPC_LE ) )
return - EIO ;
2020-05-06 06:40:29 +03:00
instr = ppc_inst_swab ( instr ) ;
2005-04-17 02:20:36 +04:00
}
2007-08-25 01:42:53 +04:00
# ifdef CONFIG_SPE
2020-05-06 06:40:28 +03:00
if ( ppc_inst_primary_opcode ( instr ) = = 0x4 ) {
2020-05-06 06:40:27 +03:00
int reg = ( ppc_inst_val ( instr ) > > 21 ) & 0x1f ;
2009-10-27 21:46:55 +03:00
PPC_WARN_ALIGNMENT ( spe , regs ) ;
2007-08-25 01:42:53 +04:00
return emulate_spe ( regs , reg , instr ) ;
2009-05-18 06:10:05 +04:00
}
2007-08-25 01:42:53 +04:00
# endif
2016-06-17 02:33:45 +03:00
/*
* ISA 3.0 ( such as P9 ) copy , copy_first , paste and paste_last alignment
* check .
*
* Send a SIGBUS to the process that caused the fault .
*
* We do not emulate these because paste may contain additional metadata
* when pasting to a co - processor . Furthermore , paste_last is the
* synchronisation point for preceding copy / paste sequences .
*/
2020-05-06 06:40:27 +03:00
if ( ( ppc_inst_val ( instr ) & 0xfc0006fe ) = = ( PPC_INST_COPY & 0xfc0006fe ) )
2016-06-17 02:33:45 +03:00
return - EIO ;
2017-08-30 07:12:40 +03:00
r = analyse_instr ( & op , regs , instr ) ;
if ( r < 0 )
return - EINVAL ;
2005-11-18 06:09:41 +03:00
2018-05-21 07:21:06 +03:00
type = GETTYPE ( op . type ) ;
2017-08-30 07:12:40 +03:00
if ( ! OP_IS_LOAD_STORE ( type ) ) {
2017-09-13 07:51:24 +03:00
if ( op . type ! = CACHEOP + DCBZ )
2017-08-30 07:12:40 +03:00
return - EINVAL ;
PPC_WARN_ALIGNMENT ( dcbz , regs ) ;
2021-09-16 17:52:09 +03:00
WARN_ON_ONCE ( ! user_mode ( regs ) ) ;
2017-08-30 07:12:40 +03:00
r = emulate_dcbz ( op . ea , regs ) ;
} else {
if ( type = = LARX | | type = = STCX )
return - EIO ;
PPC_WARN_ALIGNMENT ( unaligned , regs ) ;
r = emulate_loadstore ( regs , & op ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-18 06:09:41 +03:00
2017-08-30 07:12:40 +03:00
if ( ! r )
return 1 ;
return r ;
2005-04-17 02:20:36 +04:00
}