rcu: Provide a get_completed_synchronize_rcu() function
It is currently up to the caller to handle stale return values from get_state_synchronize_rcu(). If poll_state_synchronize_rcu() returned true once, a grace period has elapsed, regardless of the fact that counter wrap might cause some future poll_state_synchronize_rcu() invocation to return false. For example, the caller might store a separate flag that indicates whether some previous call to poll_state_synchronize_rcu() determined that the relevant grace period had already ended. This approach works, but it requires extra storage and is easy to get wrong. This commit therefore introduces a get_completed_synchronize_rcu() that returns a cookie that causes poll_state_synchronize_rcu() to always return true. This already-completed cookie can be stored in place of the cookie that previously caused poll_state_synchronize_rcu() to return true. It can also be used to flag a given structure as not having been exposed to readers, and thus not requiring a grace period to elapse. This commit is in preparation for polled expedited grace periods. Link: https://lore.kernel.org/all/20220121142454.1994916-1-bfoster@redhat.com/ Link: https://docs.google.com/document/d/1RNKWW9jQyfjxw2E8dsXVTdvZYh0HnYeSHDKog9jhdN8/edit?usp=sharing Cc: Brian Foster <bfoster@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Ian Kent <raven@themaw.net> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
2403e8044f
commit
414c12385d
@ -41,6 +41,7 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
|
|||||||
void rcu_barrier_tasks(void);
|
void rcu_barrier_tasks(void);
|
||||||
void rcu_barrier_tasks_rude(void);
|
void rcu_barrier_tasks_rude(void);
|
||||||
void synchronize_rcu(void);
|
void synchronize_rcu(void);
|
||||||
|
unsigned long get_completed_synchronize_rcu(void);
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT_RCU
|
#ifdef CONFIG_PREEMPT_RCU
|
||||||
|
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
#define RCU_SEQ_CTR_SHIFT 2
|
#define RCU_SEQ_CTR_SHIFT 2
|
||||||
#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
|
#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
|
||||||
|
|
||||||
|
/* Low-order bit definition for polled grace-period APIs. */
|
||||||
|
#define RCU_GET_STATE_COMPLETED 0x1
|
||||||
|
|
||||||
extern int sysctl_sched_rt_runtime;
|
extern int sysctl_sched_rt_runtime;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -58,7 +58,7 @@ void rcu_qs(void)
|
|||||||
rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
|
rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
|
||||||
raise_softirq_irqoff(RCU_SOFTIRQ);
|
raise_softirq_irqoff(RCU_SOFTIRQ);
|
||||||
}
|
}
|
||||||
WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 1);
|
WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
|
|||||||
*/
|
*/
|
||||||
bool poll_state_synchronize_rcu(unsigned long oldstate)
|
bool poll_state_synchronize_rcu(unsigned long oldstate)
|
||||||
{
|
{
|
||||||
return READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
|
return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
|
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
|
||||||
|
|
||||||
|
@ -3924,7 +3924,8 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
|
|||||||
*/
|
*/
|
||||||
bool poll_state_synchronize_rcu(unsigned long oldstate)
|
bool poll_state_synchronize_rcu(unsigned long oldstate)
|
||||||
{
|
{
|
||||||
if (rcu_seq_done_exact(&rcu_state.gp_seq, oldstate)) {
|
if (oldstate == RCU_GET_STATE_COMPLETED ||
|
||||||
|
rcu_seq_done_exact(&rcu_state.gp_seq, oldstate)) {
|
||||||
smp_mb(); /* Ensure GP ends before subsequent accesses. */
|
smp_mb(); /* Ensure GP ends before subsequent accesses. */
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -516,6 +516,19 @@ int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
|
|||||||
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
|
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
|
||||||
module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
|
module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_completed_synchronize_rcu - Return a pre-completed polled state cookie
|
||||||
|
*
|
||||||
|
* Returns a value that will always be treated by functions like
|
||||||
|
* poll_state_synchronize_rcu() as a cookie whose grace period has already
|
||||||
|
* completed.
|
||||||
|
*/
|
||||||
|
unsigned long get_completed_synchronize_rcu(void)
|
||||||
|
{
|
||||||
|
return RCU_GET_STATE_COMPLETED;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
|
||||||
|
|
||||||
#ifdef CONFIG_PROVE_RCU
|
#ifdef CONFIG_PROVE_RCU
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user