2019-01-17 21:13:19 +03:00
/* SPDX-License-Identifier: GPL-2.0+ */
2017-05-02 11:31:18 +03:00
/*
2017-05-02 16:30:12 +03:00
* RCU segmented callback lists , internal - to - rcu header file
2017-05-02 11:31:18 +03:00
*
* Copyright IBM Corporation , 2017
*
2019-01-17 21:13:19 +03:00
* Authors : Paul E . McKenney < paulmck @ linux . ibm . com >
2017-05-02 11:31:18 +03:00
*/
# include <linux/rcu_segcblist.h>
2019-07-02 03:36:53 +03:00
/* Return number of callbacks in the specified callback list. */
static inline long rcu_cblist_n_cbs ( struct rcu_cblist * rclp )
{
return READ_ONCE ( rclp - > len ) ;
}
2017-05-02 11:31:18 +03:00
/*
* Account for the fact that a previously dequeued callback turned out
* to be marked as lazy .
*/
static inline void rcu_cblist_dequeued_lazy ( struct rcu_cblist * rclp )
{
rclp - > len_lazy - - ;
}
2017-05-02 16:30:12 +03:00
void rcu_cblist_init ( struct rcu_cblist * rclp ) ;
struct rcu_head * rcu_cblist_dequeue ( struct rcu_cblist * rclp ) ;
2017-05-02 11:31:18 +03:00
/*
* Is the specified rcu_segcblist structure empty ?
*
* But careful ! The fact that the - > head field is NULL does not
* necessarily imply that there are no callbacks associated with
* this structure . When callbacks are being invoked , they are
* removed as a group . If callback invocation must be preempted ,
* the remaining callbacks will be added back to the list . Either
* way , the counts are updated later .
*
* So it is often the case that rcu_segcblist_n_cbs ( ) should be used
* instead .
*/
static inline bool rcu_segcblist_empty ( struct rcu_segcblist * rsclp )
{
2019-05-14 01:57:50 +03:00
return ! READ_ONCE ( rsclp - > head ) ;
2017-05-02 11:31:18 +03:00
}
/* Return number of callbacks in segmented callback list. */
static inline long rcu_segcblist_n_cbs ( struct rcu_segcblist * rsclp )
{
2019-07-02 03:36:53 +03:00
# ifdef CONFIG_RCU_NOCB_CPU
return atomic_long_read ( & rsclp - > len ) ;
# else
2017-05-02 11:31:18 +03:00
return READ_ONCE ( rsclp - > len ) ;
2019-07-02 03:36:53 +03:00
# endif
2017-05-02 11:31:18 +03:00
}
/* Return number of lazy callbacks in segmented callback list. */
static inline long rcu_segcblist_n_lazy_cbs ( struct rcu_segcblist * rsclp )
{
return rsclp - > len_lazy ;
}
/* Return number of lazy callbacks in segmented callback list. */
static inline long rcu_segcblist_n_nonlazy_cbs ( struct rcu_segcblist * rsclp )
{
2019-07-02 03:36:53 +03:00
return rcu_segcblist_n_cbs ( rsclp ) - rsclp - > len_lazy ;
2017-05-02 11:31:18 +03:00
}
/*
* Is the specified rcu_segcblist enabled , for example , not corresponding
2019-05-14 19:50:49 +03:00
* to an offline CPU ?
2017-05-02 11:31:18 +03:00
*/
static inline bool rcu_segcblist_is_enabled ( struct rcu_segcblist * rsclp )
{
2019-04-12 22:34:41 +03:00
return rsclp - > enabled ;
2017-05-02 11:31:18 +03:00
}
2019-04-13 01:58:34 +03:00
/* Is the specified rcu_segcblist offloaded? */
static inline bool rcu_segcblist_is_offloaded ( struct rcu_segcblist * rsclp )
{
return rsclp - > offloaded ;
}
2017-05-02 11:31:18 +03:00
/*
* Are all segments following the specified segment of the specified
* rcu_segcblist structure empty of callbacks ? ( The specified
* segment might well contain callbacks . )
*/
static inline bool rcu_segcblist_restempty ( struct rcu_segcblist * rsclp , int seg )
{
2019-05-14 00:36:11 +03:00
return ! READ_ONCE ( * READ_ONCE ( rsclp - > tails [ seg ] ) ) ;
2017-05-02 11:31:18 +03:00
}
2017-05-02 16:30:12 +03:00
void rcu_segcblist_init ( struct rcu_segcblist * rsclp ) ;
void rcu_segcblist_disable ( struct rcu_segcblist * rsclp ) ;
2019-04-13 01:58:34 +03:00
void rcu_segcblist_offload ( struct rcu_segcblist * rsclp ) ;
2017-05-02 16:30:12 +03:00
bool rcu_segcblist_ready_cbs ( struct rcu_segcblist * rsclp ) ;
bool rcu_segcblist_pend_cbs ( struct rcu_segcblist * rsclp ) ;
struct rcu_head * rcu_segcblist_first_cb ( struct rcu_segcblist * rsclp ) ;
struct rcu_head * rcu_segcblist_first_pend_cb ( struct rcu_segcblist * rsclp ) ;
2019-05-15 19:56:40 +03:00
bool rcu_segcblist_nextgp ( struct rcu_segcblist * rsclp , unsigned long * lp ) ;
2017-05-02 16:30:12 +03:00
void rcu_segcblist_enqueue ( struct rcu_segcblist * rsclp ,
struct rcu_head * rhp , bool lazy ) ;
bool rcu_segcblist_entrain ( struct rcu_segcblist * rsclp ,
struct rcu_head * rhp , bool lazy ) ;
void rcu_segcblist_extract_count ( struct rcu_segcblist * rsclp ,
struct rcu_cblist * rclp ) ;
void rcu_segcblist_extract_done_cbs ( struct rcu_segcblist * rsclp ,
struct rcu_cblist * rclp ) ;
void rcu_segcblist_extract_pend_cbs ( struct rcu_segcblist * rsclp ,
struct rcu_cblist * rclp ) ;
void rcu_segcblist_insert_count ( struct rcu_segcblist * rsclp ,
struct rcu_cblist * rclp ) ;
void rcu_segcblist_insert_done_cbs ( struct rcu_segcblist * rsclp ,
struct rcu_cblist * rclp ) ;
void rcu_segcblist_insert_pend_cbs ( struct rcu_segcblist * rsclp ,
struct rcu_cblist * rclp ) ;
void rcu_segcblist_advance ( struct rcu_segcblist * rsclp , unsigned long seq ) ;
bool rcu_segcblist_accelerate ( struct rcu_segcblist * rsclp , unsigned long seq ) ;
2017-06-27 17:44:06 +03:00
void rcu_segcblist_merge ( struct rcu_segcblist * dst_rsclp ,
struct rcu_segcblist * src_rsclp ) ;