2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
powerpc/lib/xor_vmx: Ensure no altivec code executes before enable_kernel_altivec()
The xor_vmx.c file is used for the RAID5 xor operations. In these functions
altivec is enabled to run the operation and then disabled.
The code uses enable_kernel_altivec() around the core of the algorithm, however
the whole file is built with -maltivec, so the compiler is within its rights to
generate altivec code anywhere. This has been seen at least once in the wild:
0:mon> di $xor_altivec_2
c0000000000b97d0 3c4c01d9 addis r2,r12,473
c0000000000b97d4 3842db30 addi r2,r2,-9424
c0000000000b97d8 7c0802a6 mflr r0
c0000000000b97dc f8010010 std r0,16(r1)
c0000000000b97e0 60000000 nop
c0000000000b97e4 7c0802a6 mflr r0
c0000000000b97e8 faa1ffa8 std r21,-88(r1)
...
c0000000000b981c f821ff41 stdu r1,-192(r1)
c0000000000b9820 7f8101ce stvx v28,r1,r0 <-- POP
c0000000000b9824 38000030 li r0,48
c0000000000b9828 7fa101ce stvx v29,r1,r0
...
c0000000000b984c 4bf6a06d bl c0000000000238b8 # enable_kernel_altivec
This patch splits the non-altivec code into xor_vmx_glue.c which calls the
altivec functions in xor_vmx.c. By compiling xor_vmx_glue.c without
-maltivec we can guarantee that altivec instruction will not be executed
outside of the enable/disable block.
Signed-off-by: Matt Brown <matthew.brown.dev@gmail.com>
[mpe: Rework change log and include disassembly]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-24 09:45:59 +10:00
/*
* Altivec XOR operations
*
* Copyright 2017 IBM Corp .
*/
# include <linux/preempt.h>
# include <linux/export.h>
# include <linux/sched.h>
# include <asm/switch_to.h>
2018-03-28 20:55:25 +02:00
# include <asm/xor_altivec.h>
powerpc/lib/xor_vmx: Ensure no altivec code executes before enable_kernel_altivec()
The xor_vmx.c file is used for the RAID5 xor operations. In these functions
altivec is enabled to run the operation and then disabled.
The code uses enable_kernel_altivec() around the core of the algorithm, however
the whole file is built with -maltivec, so the compiler is within its rights to
generate altivec code anywhere. This has been seen at least once in the wild:
0:mon> di $xor_altivec_2
c0000000000b97d0 3c4c01d9 addis r2,r12,473
c0000000000b97d4 3842db30 addi r2,r2,-9424
c0000000000b97d8 7c0802a6 mflr r0
c0000000000b97dc f8010010 std r0,16(r1)
c0000000000b97e0 60000000 nop
c0000000000b97e4 7c0802a6 mflr r0
c0000000000b97e8 faa1ffa8 std r21,-88(r1)
...
c0000000000b981c f821ff41 stdu r1,-192(r1)
c0000000000b9820 7f8101ce stvx v28,r1,r0 <-- POP
c0000000000b9824 38000030 li r0,48
c0000000000b9828 7fa101ce stvx v29,r1,r0
...
c0000000000b984c 4bf6a06d bl c0000000000238b8 # enable_kernel_altivec
This patch splits the non-altivec code into xor_vmx_glue.c which calls the
altivec functions in xor_vmx.c. By compiling xor_vmx_glue.c without
-maltivec we can guarantee that altivec instruction will not be executed
outside of the enable/disable block.
Signed-off-by: Matt Brown <matthew.brown.dev@gmail.com>
[mpe: Rework change log and include disassembly]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-24 09:45:59 +10:00
# include "xor_vmx.h"
void xor_altivec_2 ( unsigned long bytes , unsigned long * v1_in ,
unsigned long * v2_in )
{
preempt_disable ( ) ;
enable_kernel_altivec ( ) ;
__xor_altivec_2 ( bytes , v1_in , v2_in ) ;
disable_kernel_altivec ( ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( xor_altivec_2 ) ;
void xor_altivec_3 ( unsigned long bytes , unsigned long * v1_in ,
unsigned long * v2_in , unsigned long * v3_in )
{
preempt_disable ( ) ;
enable_kernel_altivec ( ) ;
__xor_altivec_3 ( bytes , v1_in , v2_in , v3_in ) ;
disable_kernel_altivec ( ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( xor_altivec_3 ) ;
void xor_altivec_4 ( unsigned long bytes , unsigned long * v1_in ,
unsigned long * v2_in , unsigned long * v3_in ,
unsigned long * v4_in )
{
preempt_disable ( ) ;
enable_kernel_altivec ( ) ;
__xor_altivec_4 ( bytes , v1_in , v2_in , v3_in , v4_in ) ;
disable_kernel_altivec ( ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( xor_altivec_4 ) ;
void xor_altivec_5 ( unsigned long bytes , unsigned long * v1_in ,
unsigned long * v2_in , unsigned long * v3_in ,
unsigned long * v4_in , unsigned long * v5_in )
{
preempt_disable ( ) ;
enable_kernel_altivec ( ) ;
__xor_altivec_5 ( bytes , v1_in , v2_in , v3_in , v4_in , v5_in ) ;
disable_kernel_altivec ( ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( xor_altivec_5 ) ;