ARM: 9282/1: vfp: Manipulate task VFP state with softirqs disabled
In a subsequent patch, we will relax the kernel mode NEON policy, and permit kernel mode NEON to be used not only from task context, as is permitted today, but also from softirq context. Given that softirqs may trigger over the back of any IRQ unless they are explicitly disabled, we need to address the resulting races in the VFP state handling, by disabling softirq processing in two distinct but related cases: - kernel mode NEON will leave the FPU disabled after it completes, so any kernel code sequence that enables the FPU and subsequently accesses its registers needs to disable softirqs until it completes; - kernel_neon_begin() will preserve the userland VFP state in memory, and if it interrupts the ordinary VFP state preserve sequence, the latter will resume execution with the VFP registers corrupted, and happily continue saving them to memory. Given that disabling softirqs also disables preemption, we can replace the existing preempt_disable/enable occurrences in the VFP state handling asm code with new macros that dis/enable softirqs instead. In the VFP state handling C code, add local_bh_disable/enable() calls in those places where the VFP state is preserved. One thing to keep in mind is that, once we allow NEON use in softirq context, the result of any such interruption is that the FPEXC_EN bit in the FPEXC register will be cleared, and vfp_current_hw_state[cpu] will be NULL. This means that any sequence that [conditionally] clears FPEXC_EN and/or sets vfp_current_hw_state[cpu] to NULL does not need to run with softirqs disabled, as the result will be the same. Furthermore, the handling of THREAD_NOTIFY_SWITCH is guaranteed to run with IRQs disabled, and so it does not need protection from softirq interruptions either. Tested-by: Martin Willi <martin@strongswan.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
This commit is contained in:
parent
368ccecd4e
commit
62b95a7b44
@ -236,22 +236,27 @@ THUMB( fpreg .req r7 )
|
|||||||
sub \tmp, \tmp, #1 @ decrement it
|
sub \tmp, \tmp, #1 @ decrement it
|
||||||
str \tmp, [\ti, #TI_PREEMPT]
|
str \tmp, [\ti, #TI_PREEMPT]
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro dec_preempt_count_ti, ti, tmp
|
|
||||||
get_thread_info \ti
|
|
||||||
dec_preempt_count \ti, \tmp
|
|
||||||
.endm
|
|
||||||
#else
|
#else
|
||||||
.macro inc_preempt_count, ti, tmp
|
.macro inc_preempt_count, ti, tmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro dec_preempt_count, ti, tmp
|
.macro dec_preempt_count, ti, tmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro dec_preempt_count_ti, ti, tmp
|
|
||||||
.endm
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
.macro local_bh_disable, ti, tmp
|
||||||
|
ldr \tmp, [\ti, #TI_PREEMPT]
|
||||||
|
add \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
|
||||||
|
str \tmp, [\ti, #TI_PREEMPT]
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro local_bh_enable_ti, ti, tmp
|
||||||
|
get_thread_info \ti
|
||||||
|
ldr \tmp, [\ti, #TI_PREEMPT]
|
||||||
|
sub \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
|
||||||
|
str \tmp, [\ti, #TI_PREEMPT]
|
||||||
|
.endm
|
||||||
|
|
||||||
#define USERL(l, x...) \
|
#define USERL(l, x...) \
|
||||||
9999: x; \
|
9999: x; \
|
||||||
.pushsection __ex_table,"a"; \
|
.pushsection __ex_table,"a"; \
|
||||||
|
@ -56,6 +56,7 @@ int main(void)
|
|||||||
DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
|
DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
DEFINE(SOFTIRQ_DISABLE_OFFSET,SOFTIRQ_DISABLE_OFFSET);
|
||||||
#ifdef CONFIG_ARM_THUMBEE
|
#ifdef CONFIG_ARM_THUMBEE
|
||||||
DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
|
DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
|
||||||
#endif
|
#endif
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
@ IRQs enabled.
|
@ IRQs enabled.
|
||||||
@
|
@
|
||||||
ENTRY(do_vfp)
|
ENTRY(do_vfp)
|
||||||
inc_preempt_count r10, r4
|
local_bh_disable r10, r4
|
||||||
ldr r4, .LCvfp
|
ldr r4, .LCvfp
|
||||||
ldr r11, [r10, #TI_CPU] @ CPU number
|
ldr r11, [r10, #TI_CPU] @ CPU number
|
||||||
add r10, r10, #TI_VFPSTATE @ r10 = workspace
|
add r10, r10, #TI_VFPSTATE @ r10 = workspace
|
||||||
@ -30,7 +30,7 @@ ENTRY(do_vfp)
|
|||||||
ENDPROC(do_vfp)
|
ENDPROC(do_vfp)
|
||||||
|
|
||||||
ENTRY(vfp_null_entry)
|
ENTRY(vfp_null_entry)
|
||||||
dec_preempt_count_ti r10, r4
|
local_bh_enable_ti r10, r4
|
||||||
ret lr
|
ret lr
|
||||||
ENDPROC(vfp_null_entry)
|
ENDPROC(vfp_null_entry)
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ vfp_hw_state_valid:
|
|||||||
@ else it's one 32-bit instruction, so
|
@ else it's one 32-bit instruction, so
|
||||||
@ always subtract 4 from the following
|
@ always subtract 4 from the following
|
||||||
@ instruction address.
|
@ instruction address.
|
||||||
dec_preempt_count_ti r10, r4
|
local_bh_enable_ti r10, r4
|
||||||
ret r9 @ we think we have handled things
|
ret r9 @ we think we have handled things
|
||||||
|
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ skip:
|
|||||||
@ not recognised by VFP
|
@ not recognised by VFP
|
||||||
|
|
||||||
DBGSTR "not VFP"
|
DBGSTR "not VFP"
|
||||||
dec_preempt_count_ti r10, r4
|
local_bh_enable_ti r10, r4
|
||||||
ret lr
|
ret lr
|
||||||
|
|
||||||
process_exception:
|
process_exception:
|
||||||
|
@ -416,7 +416,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
|||||||
if (exceptions)
|
if (exceptions)
|
||||||
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
||||||
exit:
|
exit:
|
||||||
preempt_enable();
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vfp_enable(void *unused)
|
static void vfp_enable(void *unused)
|
||||||
@ -517,6 +517,8 @@ void vfp_sync_hwstate(struct thread_info *thread)
|
|||||||
{
|
{
|
||||||
unsigned int cpu = get_cpu();
|
unsigned int cpu = get_cpu();
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
|
||||||
if (vfp_state_in_hw(cpu, thread)) {
|
if (vfp_state_in_hw(cpu, thread)) {
|
||||||
u32 fpexc = fmrx(FPEXC);
|
u32 fpexc = fmrx(FPEXC);
|
||||||
|
|
||||||
@ -528,6 +530,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
|
|||||||
fmxr(FPEXC, fpexc);
|
fmxr(FPEXC, fpexc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
local_bh_enable();
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -717,6 +720,8 @@ void kernel_neon_begin(void)
|
|||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
u32 fpexc;
|
u32 fpexc;
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kernel mode NEON is only allowed outside of interrupt context
|
* Kernel mode NEON is only allowed outside of interrupt context
|
||||||
* with preemption disabled. This will make sure that the kernel
|
* with preemption disabled. This will make sure that the kernel
|
||||||
@ -739,6 +744,7 @@ void kernel_neon_begin(void)
|
|||||||
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
|
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
|
||||||
#endif
|
#endif
|
||||||
vfp_current_hw_state[cpu] = NULL;
|
vfp_current_hw_state[cpu] = NULL;
|
||||||
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kernel_neon_begin);
|
EXPORT_SYMBOL(kernel_neon_begin);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user