2019-01-17 21:19:01 +03:00
// SPDX-License-Identifier: GPL-2.0+
2015-08-21 20:42:44 +03:00
/*
* RCU - based infrastructure for lightweight reader - writer locking
*
* Copyright ( c ) 2015 , Red Hat , Inc .
*
* Author : Oleg Nesterov < oleg @ redhat . com >
*/
# include <linux/rcu_sync.h>
# include <linux/sched.h>
2019-04-25 19:50:55 +03:00
enum { GP_IDLE = 0 , GP_ENTER , GP_PASSED , GP_EXIT , GP_REPLAY } ;
2015-08-21 20:42:44 +03:00
# define rss_lock gp_wait.lock
/**
* rcu_sync_init ( ) - Initialize an rcu_sync structure
* @ rsp : Pointer to rcu_sync structure to be initialized
*/
2019-04-23 15:07:24 +03:00
void rcu_sync_init ( struct rcu_sync * rsp )
2015-08-21 20:42:44 +03:00
{
memset ( rsp , 0 , sizeof ( * rsp ) ) ;
init_waitqueue_head ( & rsp - > gp_wait ) ;
}
2016-08-11 19:54:13 +03:00
/**
2017-10-20 00:26:21 +03:00
* rcu_sync_enter_start - Force readers onto slow path for multiple updates
* @ rsp : Pointer to rcu_sync structure to use for synchronization
*
2016-08-11 19:54:13 +03:00
* Must be called after rcu_sync_init ( ) and before first use .
*
* Ensures rcu_sync_is_idle ( ) returns false and rcu_sync_ { enter , exit } ( )
* pairs turn into NO - OPs .
*/
void rcu_sync_enter_start ( struct rcu_sync * rsp )
{
rsp - > gp_count + + ;
rsp - > gp_state = GP_PASSED ;
}
2015-08-21 20:42:44 +03:00
2019-04-25 19:50:55 +03:00
static void rcu_sync_func ( struct rcu_head * rhp ) ;
2015-08-21 20:42:44 +03:00
2019-04-25 19:50:55 +03:00
static void rcu_sync_call ( struct rcu_sync * rsp )
{
2022-10-16 19:22:59 +03:00
call_rcu_hurry ( & rsp - > cb_head , rcu_sync_func ) ;
2015-08-21 20:42:44 +03:00
}
/**
* rcu_sync_func ( ) - Callback function managing reader access to fastpath
2017-10-20 00:26:21 +03:00
* @ rhp : Pointer to rcu_head in rcu_sync structure to use for synchronization
2015-08-21 20:42:44 +03:00
*
2019-04-25 19:50:55 +03:00
* This function is passed to call_rcu ( ) function by rcu_sync_enter ( ) and
2015-08-21 20:42:44 +03:00
* rcu_sync_exit ( ) , so that it is invoked after a grace period following the
2019-04-25 19:50:55 +03:00
* that invocation of enter / exit .
*
* If it is called by rcu_sync_enter ( ) it signals that all the readers were
* switched onto slow path .
*
* If it is called by rcu_sync_exit ( ) it takes action based on events that
2015-08-21 20:42:44 +03:00
* have taken place in the meantime , so that closely spaced rcu_sync_enter ( )
* and rcu_sync_exit ( ) pairs need not wait for a grace period .
*
* If another rcu_sync_enter ( ) is invoked before the grace period
* ended , reset state to allow the next rcu_sync_exit ( ) to let the
* readers back onto their fastpaths ( after a grace period ) . If both
* another rcu_sync_enter ( ) and its matching rcu_sync_exit ( ) are invoked
* before the grace period ended , re - invoke call_rcu ( ) on behalf of that
* rcu_sync_exit ( ) . Otherwise , set all state back to idle so that readers
* can again use their fastpaths .
*/
2017-10-20 00:26:21 +03:00
static void rcu_sync_func ( struct rcu_head * rhp )
2015-08-21 20:42:44 +03:00
{
2017-10-20 00:26:21 +03:00
struct rcu_sync * rsp = container_of ( rhp , struct rcu_sync , cb_head ) ;
2015-08-21 20:42:44 +03:00
unsigned long flags ;
2019-04-25 19:50:55 +03:00
WARN_ON_ONCE ( READ_ONCE ( rsp - > gp_state ) = = GP_IDLE ) ;
WARN_ON_ONCE ( READ_ONCE ( rsp - > gp_state ) = = GP_PASSED ) ;
2015-08-21 20:42:44 +03:00
spin_lock_irqsave ( & rsp - > rss_lock , flags ) ;
if ( rsp - > gp_count ) {
/*
2019-04-25 19:50:55 +03:00
* We ' re at least a GP after the GP_IDLE - > GP_ENTER transition .
2015-08-21 20:42:44 +03:00
*/
2019-04-25 19:50:55 +03:00
WRITE_ONCE ( rsp - > gp_state , GP_PASSED ) ;
wake_up_locked ( & rsp - > gp_wait ) ;
} else if ( rsp - > gp_state = = GP_REPLAY ) {
2015-08-21 20:42:44 +03:00
/*
2019-04-25 19:50:55 +03:00
* A new rcu_sync_exit ( ) has happened ; requeue the callback to
* catch a later GP .
2015-08-21 20:42:44 +03:00
*/
2019-04-25 19:50:55 +03:00
WRITE_ONCE ( rsp - > gp_state , GP_EXIT ) ;
rcu_sync_call ( rsp ) ;
2015-08-21 20:42:44 +03:00
} else {
/*
2021-03-23 08:29:10 +03:00
* We ' re at least a GP after the last rcu_sync_exit ( ) ; everybody
2019-04-25 19:50:55 +03:00
* will now have observed the write side critical section .
2021-03-23 08:29:10 +03:00
* Let ' em rip !
2015-08-21 20:42:44 +03:00
*/
2019-04-25 19:50:55 +03:00
WRITE_ONCE ( rsp - > gp_state , GP_IDLE ) ;
2015-08-21 20:42:44 +03:00
}
spin_unlock_irqrestore ( & rsp - > rss_lock , flags ) ;
}
/**
2019-04-25 19:50:55 +03:00
* rcu_sync_enter ( ) - Force readers onto slowpath
* @ rsp : Pointer to rcu_sync structure to use for synchronization
*
* This function is used by updaters who need readers to make use of
* a slowpath during the update . After this function returns , all
* subsequent calls to rcu_sync_is_idle ( ) will return false , which
* tells readers to stay off their fastpaths . A later call to
2022-03-30 01:26:13 +03:00
* rcu_sync_exit ( ) re - enables reader fastpaths .
2019-04-25 19:50:55 +03:00
*
* When called in isolation , rcu_sync_enter ( ) must wait for a grace
* period , however , closely spaced calls to rcu_sync_enter ( ) can
* optimize away the grace - period wait via a state machine implemented
* by rcu_sync_enter ( ) , rcu_sync_exit ( ) , and rcu_sync_func ( ) .
*/
void rcu_sync_enter ( struct rcu_sync * rsp )
{
int gp_state ;
spin_lock_irq ( & rsp - > rss_lock ) ;
gp_state = rsp - > gp_state ;
if ( gp_state = = GP_IDLE ) {
WRITE_ONCE ( rsp - > gp_state , GP_ENTER ) ;
WARN_ON_ONCE ( rsp - > gp_count ) ;
/*
* Note that we could simply do rcu_sync_call ( rsp ) here and
* avoid the " if (gp_state == GP_IDLE) " block below .
*
* However , synchronize_rcu ( ) can be faster if rcu_expedited
* or rcu_blocking_is_gp ( ) is true .
*
* Another reason is that we can ' t wait for rcu callback if
* we are called at early boot time but this shouldn ' t happen .
*/
}
rsp - > gp_count + + ;
spin_unlock_irq ( & rsp - > rss_lock ) ;
if ( gp_state = = GP_IDLE ) {
/*
* See the comment above , this simply does the " synchronous "
* call_rcu ( rcu_sync_func ) which does GP_ENTER - > GP_PASSED .
*/
synchronize_rcu ( ) ;
rcu_sync_func ( & rsp - > cb_head ) ;
/* Not really needed, wait_event() would see GP_PASSED. */
return ;
}
wait_event ( rsp - > gp_wait , READ_ONCE ( rsp - > gp_state ) > = GP_PASSED ) ;
}
/**
* rcu_sync_exit ( ) - Allow readers back onto fast path after grace period
2015-08-21 20:42:44 +03:00
* @ rsp : Pointer to rcu_sync structure to use for synchronization
*
* This function is used by updaters who have completed , and can therefore
* now allow readers to make use of their fastpaths after a grace period
* has elapsed . After this grace period has completed , all subsequent
* calls to rcu_sync_is_idle ( ) will return true , which tells readers that
* they can once again use their fastpaths .
*/
void rcu_sync_exit ( struct rcu_sync * rsp )
{
2019-04-25 19:50:55 +03:00
WARN_ON_ONCE ( READ_ONCE ( rsp - > gp_state ) = = GP_IDLE ) ;
WARN_ON_ONCE ( READ_ONCE ( rsp - > gp_count ) = = 0 ) ;
2015-08-21 20:42:44 +03:00
spin_lock_irq ( & rsp - > rss_lock ) ;
if ( ! - - rsp - > gp_count ) {
2019-04-25 19:50:55 +03:00
if ( rsp - > gp_state = = GP_PASSED ) {
WRITE_ONCE ( rsp - > gp_state , GP_EXIT ) ;
rcu_sync_call ( rsp ) ;
} else if ( rsp - > gp_state = = GP_EXIT ) {
WRITE_ONCE ( rsp - > gp_state , GP_REPLAY ) ;
2015-08-21 20:42:44 +03:00
}
}
spin_unlock_irq ( & rsp - > rss_lock ) ;
}
2015-08-21 20:42:52 +03:00
/**
* rcu_sync_dtor ( ) - Clean up an rcu_sync structure
* @ rsp : Pointer to rcu_sync structure to be cleaned up
*/
void rcu_sync_dtor ( struct rcu_sync * rsp )
{
2019-04-25 19:50:55 +03:00
int gp_state ;
2015-08-21 20:42:52 +03:00
2019-04-25 19:50:55 +03:00
WARN_ON_ONCE ( READ_ONCE ( rsp - > gp_count ) ) ;
WARN_ON_ONCE ( READ_ONCE ( rsp - > gp_state ) = = GP_PASSED ) ;
2015-08-21 20:42:52 +03:00
spin_lock_irq ( & rsp - > rss_lock ) ;
2019-04-25 19:50:55 +03:00
if ( rsp - > gp_state = = GP_REPLAY )
WRITE_ONCE ( rsp - > gp_state , GP_EXIT ) ;
gp_state = rsp - > gp_state ;
2015-08-21 20:42:52 +03:00
spin_unlock_irq ( & rsp - > rss_lock ) ;
2019-04-25 19:50:55 +03:00
if ( gp_state ! = GP_IDLE ) {
2019-04-23 15:07:24 +03:00
rcu_barrier ( ) ;
2019-04-25 19:50:55 +03:00
WARN_ON_ONCE ( rsp - > gp_state ! = GP_IDLE ) ;
2015-08-21 20:42:52 +03:00
}
}