arm64: fpsimd: Bring cond_yield asm macro in line with new rules
We no longer disable softirqs or preemption when doing kernel mode SIMD, and so for fully preemptible kernels, there is no longer a need to do any explicit yielding (and for non-preemptible kernels, yielding is not needed either). That leaves voluntary preemption, where only explicit yield calls may result in a reschedule. To retain the existing behavior for such a configuration, we should take the new situation into account, where the preempt count will be zero rather than one, and yielding to pending softirqs is unnecessary. Fixes: aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch") Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20240111112447.577640-2-ardb+git@google.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
8c5a19cb17
commit
3931261ecf
@ -760,32 +760,25 @@ alternative_endif
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether preempt/bh-disabled asm code should yield as soon as
|
* Check whether asm code should yield as soon as it is able. This is
|
||||||
* it is able. This is the case if we are currently running in task
|
* the case if we are currently running in task context, and the
|
||||||
* context, and either a softirq is pending, or the TIF_NEED_RESCHED
|
* TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
|
||||||
* flag is set and re-enabling preemption a single time would result in
|
* is stored negated in the top word of the thread_info::preempt_count
|
||||||
* a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
|
|
||||||
* stored negated in the top word of the thread_info::preempt_count
|
|
||||||
* field)
|
* field)
|
||||||
*/
|
*/
|
||||||
.macro cond_yield, lbl:req, tmp:req, tmp2:req
|
.macro cond_yield, lbl:req, tmp:req, tmp2
|
||||||
|
#ifdef CONFIG_PREEMPT_VOLUNTARY
|
||||||
get_current_task \tmp
|
get_current_task \tmp
|
||||||
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
|
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
|
||||||
/*
|
/*
|
||||||
* If we are serving a softirq, there is no point in yielding: the
|
* If we are serving a softirq, there is no point in yielding: the
|
||||||
* softirq will not be preempted no matter what we do, so we should
|
* softirq will not be preempted no matter what we do, so we should
|
||||||
* run to completion as quickly as we can.
|
* run to completion as quickly as we can. The preempt_count field will
|
||||||
|
* have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
|
||||||
|
* catch this case too.
|
||||||
*/
|
*/
|
||||||
tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
|
|
||||||
#ifdef CONFIG_PREEMPTION
|
|
||||||
sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
|
|
||||||
cbz \tmp, \lbl
|
cbz \tmp, \lbl
|
||||||
#endif
|
#endif
|
||||||
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
|
|
||||||
get_this_cpu_offset \tmp2
|
|
||||||
ldr w\tmp, [\tmp, \tmp2]
|
|
||||||
cbnz w\tmp, \lbl // yield on pending softirq in task context
|
|
||||||
.Lnoyield_\@:
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -117,8 +117,6 @@ int main(void)
|
|||||||
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
|
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
|
||||||
BLANK();
|
BLANK();
|
||||||
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
|
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
|
||||||
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
|
|
||||||
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
|
|
||||||
BLANK();
|
BLANK();
|
||||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||||
BLANK();
|
BLANK();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user