2018-09-20 10:26:40 +01:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef __ASM_PREEMPT_H
# define __ASM_PREEMPT_H
# include <linux/thread_info.h>
# define PREEMPT_NEED_RESCHED BIT(32)
# define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
static inline int preempt_count ( void )
{
return READ_ONCE ( current_thread_info ( ) - > preempt . count ) ;
}
static inline void preempt_count_set ( u64 pc )
{
/* Preserve existing value of PREEMPT_NEED_RESCHED */
WRITE_ONCE ( current_thread_info ( ) - > preempt . count , pc ) ;
}
# define init_task_preempt_count(p) do { \
task_thread_info ( p ) - > preempt_count = FORK_PREEMPT_COUNT ; \
} while ( 0 )
# define init_idle_preempt_count(p, cpu) do { \
sched/core: Initialize the idle task with preemption disabled
As pointed out by commit
de9b8f5dcbd9 ("sched: Fix crash trying to dequeue/enqueue the idle thread")
init_idle() can and will be invoked more than once on the same idle
task. At boot time, it is invoked for the boot CPU thread by
sched_init(). Then smp_init() creates the threads for all the secondary
CPUs and invokes init_idle() on them.
As the hotplug machinery brings the secondaries to life, it will issue
calls to idle_thread_get(), which itself invokes init_idle() yet again.
In this case it's invoked twice more per secondary: at _cpu_up(), and at
bringup_cpu().
Given smp_init() already initializes the idle tasks for all *possible*
CPUs, no further initialization should be required. Now, removing
init_idle() from idle_thread_get() exposes some interesting expectations
with regards to the idle task's preempt_count: the secondary startup always
issues a preempt_disable(), requiring some reset of the preempt count to 0
between hot-unplug and hotplug, which is currently served by
idle_thread_get() -> idle_init().
Given the idle task is supposed to have preemption disabled once and never
see it re-enabled, it seems that what we actually want is to initialize its
preempt_count to PREEMPT_DISABLED and leave it there. Do that, and remove
init_idle() from idle_thread_get().
Secondary startups were patched via coccinelle:
@begone@
@@
-preempt_disable();
...
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210512094636.2958515-1-valentin.schneider@arm.com
2021-05-12 10:46:36 +01:00
task_thread_info ( p ) - > preempt_count = PREEMPT_DISABLED ; \
2018-09-20 10:26:40 +01:00
} while ( 0 )
static inline void set_preempt_need_resched ( void )
{
current_thread_info ( ) - > preempt . need_resched = 0 ;
}
static inline void clear_preempt_need_resched ( void )
{
current_thread_info ( ) - > preempt . need_resched = 1 ;
}
static inline bool test_preempt_need_resched ( void )
{
return ! current_thread_info ( ) - > preempt . need_resched ;
}
static inline void __preempt_count_add ( int val )
{
u32 pc = READ_ONCE ( current_thread_info ( ) - > preempt . count ) ;
pc + = val ;
WRITE_ONCE ( current_thread_info ( ) - > preempt . count , pc ) ;
}
static inline void __preempt_count_sub ( int val )
{
u32 pc = READ_ONCE ( current_thread_info ( ) - > preempt . count ) ;
pc - = val ;
WRITE_ONCE ( current_thread_info ( ) - > preempt . count , pc ) ;
}
static inline bool __preempt_count_dec_and_test ( void )
{
struct thread_info * ti = current_thread_info ( ) ;
u64 pc = READ_ONCE ( ti - > preempt_count ) ;
/* Update only the count field, leaving need_resched unchanged */
WRITE_ONCE ( ti - > preempt . count , - - pc ) ;
/*
* If we wrote back all zeroes , then we ' re preemptible and in
* need of a reschedule . Otherwise , we need to reload the
* preempt_count in case the need_resched flag was cleared by an
* interrupt occurring between the non - atomic READ_ONCE / WRITE_ONCE
* pair .
*/
return ! pc | | ! READ_ONCE ( ti - > preempt_count ) ;
}
static inline bool should_resched ( int preempt_offset )
{
u64 pc = READ_ONCE ( current_thread_info ( ) - > preempt_count ) ;
return pc = = preempt_offset ;
}
2019-10-15 21:17:49 +02:00
# ifdef CONFIG_PREEMPTION
2018-09-20 10:26:40 +01:00
void preempt_schedule ( void ) ;
# define __preempt_schedule() preempt_schedule()
void preempt_schedule_notrace ( void ) ;
# define __preempt_schedule_notrace() preempt_schedule_notrace()
2019-10-15 21:17:49 +02:00
# endif /* CONFIG_PREEMPTION */
2018-09-20 10:26:40 +01:00
# endif /* __ASM_PREEMPT_H */