Merge remote-tracking branch 'arm64/for-next/neon-softirqs-disabled' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
3284cd638b
@ -700,7 +700,7 @@ AES_FUNC_START(aes_mac_update)
|
||||
cbz w5, .Lmacout
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
st1 {v0.16b}, [x4] /* return dg */
|
||||
cond_yield .Lmacout, x7
|
||||
cond_yield .Lmacout, x7, x8
|
||||
b .Lmacloop4x
|
||||
.Lmac1x:
|
||||
add w3, w3, #4
|
||||
|
@ -121,7 +121,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
|
||||
add dgav.4s, dgav.4s, dg0v.4s
|
||||
|
||||
cbz w2, 2f
|
||||
cond_yield 3f, x5
|
||||
cond_yield 3f, x5, x6
|
||||
b 0b
|
||||
|
||||
/*
|
||||
|
@ -129,7 +129,7 @@ CPU_LE( rev32 v19.16b, v19.16b )
|
||||
|
||||
/* handled all input blocks? */
|
||||
cbz w2, 2f
|
||||
cond_yield 3f, x5
|
||||
cond_yield 3f, x5, x6
|
||||
b 0b
|
||||
|
||||
/*
|
||||
|
@ -184,11 +184,11 @@ SYM_FUNC_START(sha3_ce_transform)
|
||||
eor v0.16b, v0.16b, v31.16b
|
||||
|
||||
cbnz w8, 3b
|
||||
cond_yield 3f, x8
|
||||
cond_yield 4f, x8, x9
|
||||
cbnz w2, 0b
|
||||
|
||||
/* save state */
|
||||
3: st1 { v0.1d- v3.1d}, [x0], #32
|
||||
4: st1 { v0.1d- v3.1d}, [x0], #32
|
||||
st1 { v4.1d- v7.1d}, [x0], #32
|
||||
st1 { v8.1d-v11.1d}, [x0], #32
|
||||
st1 {v12.1d-v15.1d}, [x0], #32
|
||||
|
@ -195,7 +195,7 @@ CPU_LE( rev64 v19.16b, v19.16b )
|
||||
add v10.2d, v10.2d, v2.2d
|
||||
add v11.2d, v11.2d, v3.2d
|
||||
|
||||
cond_yield 3f, x4
|
||||
cond_yield 3f, x4, x5
|
||||
/* handled all input blocks? */
|
||||
cbnz w2, 0b
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <asm-generic/export.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/asm-bug.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cputype.h>
|
||||
@ -24,6 +25,14 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
/*
|
||||
* Provide a wxN alias for each wN register so what we can paste a xN
|
||||
* reference after a 'w' to obtain the 32-bit version.
|
||||
*/
|
||||
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
||||
wx\n .req w\n
|
||||
.endr
|
||||
|
||||
.macro save_and_disable_daif, flags
|
||||
mrs \flags, daif
|
||||
msr daifset, #0xf
|
||||
@ -713,90 +722,33 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
||||
set_sctlr sctlr_el2, \reg
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Check whether to yield to another runnable task from kernel mode NEON code
|
||||
* (which runs with preemption disabled).
|
||||
*
|
||||
* if_will_cond_yield_neon
|
||||
* // pre-yield patchup code
|
||||
* do_cond_yield_neon
|
||||
* // post-yield patchup code
|
||||
* endif_yield_neon <label>
|
||||
*
|
||||
* where <label> is optional, and marks the point where execution will resume
|
||||
* after a yield has been performed. If omitted, execution resumes right after
|
||||
* the endif_yield_neon invocation. Note that the entire sequence, including
|
||||
* the provided patchup code, will be omitted from the image if
|
||||
* CONFIG_PREEMPTION is not defined.
|
||||
*
|
||||
* As a convenience, in the case where no patchup code is required, the above
|
||||
* sequence may be abbreviated to
|
||||
*
|
||||
* cond_yield_neon <label>
|
||||
*
|
||||
* Note that the patchup code does not support assembler directives that change
|
||||
* the output section, any use of such directives is undefined.
|
||||
*
|
||||
* The yield itself consists of the following:
|
||||
* - Check whether the preempt count is exactly 1 and a reschedule is also
|
||||
* needed. If so, calling of preempt_enable() in kernel_neon_end() will
|
||||
* trigger a reschedule. If it is not the case, yielding is pointless.
|
||||
* - Disable and re-enable kernel mode NEON, and branch to the yield fixup
|
||||
* code.
|
||||
*
|
||||
* This macro sequence may clobber all CPU state that is not guaranteed by the
|
||||
* AAPCS to be preserved across an ordinary function call.
|
||||
*/
|
||||
|
||||
.macro cond_yield_neon, lbl
|
||||
if_will_cond_yield_neon
|
||||
do_cond_yield_neon
|
||||
endif_yield_neon \lbl
|
||||
.endm
|
||||
|
||||
.macro if_will_cond_yield_neon
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
get_current_task x0
|
||||
ldr x0, [x0, #TSK_TI_PREEMPT]
|
||||
sub x0, x0, #PREEMPT_DISABLE_OFFSET
|
||||
cbz x0, .Lyield_\@
|
||||
/* fall through to endif_yield_neon */
|
||||
.subsection 1
|
||||
.Lyield_\@ :
|
||||
#else
|
||||
.section ".discard.cond_yield_neon", "ax"
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro do_cond_yield_neon
|
||||
bl kernel_neon_end
|
||||
bl kernel_neon_begin
|
||||
.endm
|
||||
|
||||
.macro endif_yield_neon, lbl
|
||||
.ifnb \lbl
|
||||
b \lbl
|
||||
.else
|
||||
b .Lyield_out_\@
|
||||
.endif
|
||||
.previous
|
||||
.Lyield_out_\@ :
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Check whether preempt-disabled code should yield as soon as it
|
||||
* is able. This is the case if re-enabling preemption a single
|
||||
* time results in a preempt count of zero, and the TIF_NEED_RESCHED
|
||||
* flag is set. (Note that the latter is stored negated in the
|
||||
* top word of the thread_info::preempt_count field)
|
||||
* Check whether preempt/bh-disabled asm code should yield as soon as
|
||||
* it is able. This is the case if we are currently running in task
|
||||
* context, and either a softirq is pending, or the TIF_NEED_RESCHED
|
||||
* flag is set and re-enabling preemption a single time would result in
|
||||
* a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
|
||||
* stored negated in the top word of the thread_info::preempt_count
|
||||
* field)
|
||||
*/
|
||||
.macro cond_yield, lbl:req, tmp:req
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
.macro cond_yield, lbl:req, tmp:req, tmp2:req
|
||||
get_current_task \tmp
|
||||
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
|
||||
/*
|
||||
* If we are serving a softirq, there is no point in yielding: the
|
||||
* softirq will not be preempted no matter what we do, so we should
|
||||
* run to completion as quickly as we can.
|
||||
*/
|
||||
tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
|
||||
cbz \tmp, \lbl
|
||||
#endif
|
||||
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
|
||||
this_cpu_offset \tmp2
|
||||
ldr w\tmp, [\tmp, \tmp2]
|
||||
cbnz w\tmp, \lbl // yield on pending softirq in task context
|
||||
.Lnoyield_\@:
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -95,6 +95,8 @@ int main(void)
|
||||
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
|
||||
BLANK();
|
||||
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
|
||||
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
|
||||
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
|
||||
BLANK();
|
||||
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||
|
@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void)
|
||||
*/
|
||||
static void get_cpu_fpsimd_context(void)
|
||||
{
|
||||
preempt_disable();
|
||||
local_bh_disable();
|
||||
__get_cpu_fpsimd_context();
|
||||
}
|
||||
|
||||
@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void)
|
||||
static void put_cpu_fpsimd_context(void)
|
||||
{
|
||||
__put_cpu_fpsimd_context();
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static bool have_cpu_fpsimd_context(void)
|
||||
|
Loading…
x
Reference in New Issue
Block a user