2018-08-11 08:19:58 +03:00
// SPDX-License-Identifier: GPL-2.0
2013-03-24 03:11:31 +04:00
/*
* Asynchronous refcounty things
*
* Copyright 2010 , 2011 Kent Overstreet < kent . overstreet @ gmail . com >
* Copyright 2012 Google , Inc .
*/
2017-03-18 03:35:23 +03:00
# include <linux/closure.h>
2013-03-24 03:11:31 +04:00
# include <linux/debugfs.h>
2017-03-18 03:35:23 +03:00
# include <linux/export.h>
2023-03-05 06:45:27 +03:00
# include <linux/rcupdate.h>
2013-03-24 03:11:31 +04:00
# include <linux/seq_file.h>
2018-01-08 23:21:26 +03:00
# include <linux/sched/debug.h>
2013-03-24 03:11:31 +04:00
static inline void closure_put_after_sub ( struct closure * cl , int flags )
{
int r = flags & CLOSURE_REMAINING_MASK ;
BUG_ON ( flags & CLOSURE_GUARD_MASK ) ;
2013-11-02 05:03:08 +04:00
BUG_ON ( ! r & & ( flags & ~ CLOSURE_DESTRUCTOR ) ) ;
2013-03-24 03:11:31 +04:00
if ( ! r ) {
if ( cl - > fn & & ! ( flags & CLOSURE_DESTRUCTOR ) ) {
atomic_set ( & cl - > remaining ,
CLOSURE_REMAINING_INITIALIZER ) ;
closure_queue ( cl ) ;
} else {
struct closure * parent = cl - > parent ;
2013-07-11 05:04:21 +04:00
closure_fn * destructor = cl - > fn ;
2013-03-24 03:11:31 +04:00
closure_debug_destroy ( cl ) ;
2013-07-11 05:04:21 +04:00
if ( destructor )
destructor ( cl ) ;
2013-03-24 03:11:31 +04:00
if ( parent )
closure_put ( parent ) ;
}
}
}
/* For clearing flags with the same atomic op as a put */
void closure_sub ( struct closure * cl , int v )
{
closure_put_after_sub ( cl , atomic_sub_return ( v , & cl - > remaining ) ) ;
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( closure_sub ) ;
2013-03-24 03:11:31 +04:00
2018-03-19 03:36:29 +03:00
/*
2013-12-21 03:55:23 +04:00
* closure_put - decrement a closure ' s refcount
*/
2013-03-24 03:11:31 +04:00
void closure_put ( struct closure * cl )
{
closure_put_after_sub ( cl , atomic_dec_return ( & cl - > remaining ) ) ;
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( closure_put ) ;
2013-03-24 03:11:31 +04:00
2018-03-19 03:36:29 +03:00
/*
2013-12-21 03:55:23 +04:00
* closure_wake_up - wake up all closures on a wait list , without memory barrier
*/
2013-03-24 03:11:31 +04:00
void __closure_wake_up ( struct closure_waitlist * wait_list )
{
struct llist_node * list ;
bcache: use llist_for_each_entry_safe() in __closure_wake_up()
Commit 09b3efec ("bcache: Don't reinvent the wheel but use existing llist
API") replaces the following while loop by llist_for_each_entry(),
-
- while (reverse) {
- cl = container_of(reverse, struct closure, list);
- reverse = llist_next(reverse);
-
+ llist_for_each_entry(cl, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
This modification introduces a potential race by iterating a corrupted
list. Here is how it happens.
In the above modification, closure_sub() may wake up a process which is
waiting on reverse list. If this process decides to wait again by calling
closure_wait(), its cl->list will be added to another wait list. Then
when llist_for_each_entry() continues to iterate next node, it will travel
on another new wait list which is added in closure_wait(), not the
original reverse list in __closure_wake_up(). It is more probably to
happen on UP machine because the waked up process may preempt the process
which wakes up it.
Use llist_for_each_entry_safe() will fix the issue, the safe version fetch
next node before waking up a process. Then the copy of next node will make
sure list iteration stays on original reverse list.
Fixes: 09b3efec81de ("bcache: Don't reinvent the wheel but use existing llist API")
Signed-off-by: Coly Li <colyli@suse.de>
Reported-by: Michael Lyle <mlyle@lyle.org>
Reviewed-by: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2017-09-26 12:54:12 +03:00
struct closure * cl , * t ;
2013-03-24 03:11:31 +04:00
struct llist_node * reverse = NULL ;
list = llist_del_all ( & wait_list - > list ) ;
/* We first reverse the list to preserve FIFO ordering and fairness */
2017-09-06 09:25:54 +03:00
reverse = llist_reverse_order ( list ) ;
2013-03-24 03:11:31 +04:00
/* Then do the wakeups */
bcache: use llist_for_each_entry_safe() in __closure_wake_up()
Commit 09b3efec ("bcache: Don't reinvent the wheel but use existing llist
API") replaces the following while loop by llist_for_each_entry(),
-
- while (reverse) {
- cl = container_of(reverse, struct closure, list);
- reverse = llist_next(reverse);
-
+ llist_for_each_entry(cl, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
This modification introduces a potential race by iterating a corrupted
list. Here is how it happens.
In the above modification, closure_sub() may wake up a process which is
waiting on reverse list. If this process decides to wait again by calling
closure_wait(), its cl->list will be added to another wait list. Then
when llist_for_each_entry() continues to iterate next node, it will travel
on another new wait list which is added in closure_wait(), not the
original reverse list in __closure_wake_up(). It is more probably to
happen on UP machine because the waked up process may preempt the process
which wakes up it.
Use llist_for_each_entry_safe() will fix the issue, the safe version fetch
next node before waking up a process. Then the copy of next node will make
sure list iteration stays on original reverse list.
Fixes: 09b3efec81de ("bcache: Don't reinvent the wheel but use existing llist API")
Signed-off-by: Coly Li <colyli@suse.de>
Reported-by: Michael Lyle <mlyle@lyle.org>
Reviewed-by: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2017-09-26 12:54:12 +03:00
llist_for_each_entry_safe ( cl , t , reverse , list ) {
2013-12-21 03:55:23 +04:00
closure_set_waiting ( cl , 0 ) ;
2013-03-24 03:11:31 +04:00
closure_sub ( cl , CLOSURE_WAITING + 1 ) ;
}
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( __closure_wake_up ) ;
2013-03-24 03:11:31 +04:00
2013-12-21 03:55:23 +04:00
/**
* closure_wait - add a closure to a waitlist
2018-03-19 03:36:29 +03:00
* @ waitlist : will own a ref on @ cl , which will be released when
2013-12-21 03:55:23 +04:00
* closure_wake_up ( ) is called on @ waitlist .
2018-03-19 03:36:29 +03:00
* @ cl : closure pointer .
2013-12-21 03:55:23 +04:00
*
*/
bool closure_wait ( struct closure_waitlist * waitlist , struct closure * cl )
2013-03-24 03:11:31 +04:00
{
if ( atomic_read ( & cl - > remaining ) & CLOSURE_WAITING )
return false ;
2013-12-21 03:55:23 +04:00
closure_set_waiting ( cl , _RET_IP_ ) ;
2013-03-24 03:11:31 +04:00
atomic_add ( CLOSURE_WAITING + 1 , & cl - > remaining ) ;
2013-12-21 03:55:23 +04:00
llist_add ( & cl - > list , & waitlist - > list ) ;
2013-03-24 03:11:31 +04:00
return true ;
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( closure_wait ) ;
2013-03-24 03:11:31 +04:00
2018-01-08 23:21:25 +03:00
struct closure_syncer {
struct task_struct * task ;
int done ;
} ;
static void closure_sync_fn ( struct closure * cl )
2013-03-24 03:11:31 +04:00
{
2019-09-03 16:25:45 +03:00
struct closure_syncer * s = cl - > s ;
struct task_struct * p ;
rcu_read_lock ( ) ;
p = READ_ONCE ( s - > task ) ;
s - > done = 1 ;
wake_up_process ( p ) ;
rcu_read_unlock ( ) ;
2018-01-08 23:21:25 +03:00
}
2013-03-24 03:11:31 +04:00
2018-01-08 23:21:26 +03:00
void __sched __closure_sync ( struct closure * cl )
2018-01-08 23:21:25 +03:00
{
struct closure_syncer s = { . task = current } ;
2013-03-24 03:11:31 +04:00
2018-01-08 23:21:25 +03:00
cl - > s = & s ;
continue_at ( cl , closure_sync_fn , NULL ) ;
while ( 1 ) {
set_current_state ( TASK_UNINTERRUPTIBLE ) ;
if ( s . done )
break ;
2013-03-24 03:11:31 +04:00
schedule ( ) ;
}
2018-01-08 23:21:25 +03:00
__set_current_state ( TASK_RUNNING ) ;
2013-03-24 03:11:31 +04:00
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( __closure_sync ) ;
2013-03-24 03:11:31 +04:00
2017-03-18 03:35:23 +03:00
# ifdef CONFIG_DEBUG_CLOSURES
2013-03-24 03:11:31 +04:00
static LIST_HEAD ( closure_list ) ;
static DEFINE_SPINLOCK ( closure_list_lock ) ;
void closure_debug_create ( struct closure * cl )
{
unsigned long flags ;
BUG_ON ( cl - > magic = = CLOSURE_MAGIC_ALIVE ) ;
cl - > magic = CLOSURE_MAGIC_ALIVE ;
spin_lock_irqsave ( & closure_list_lock , flags ) ;
list_add ( & cl - > all , & closure_list ) ;
spin_unlock_irqrestore ( & closure_list_lock , flags ) ;
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( closure_debug_create ) ;
2013-03-24 03:11:31 +04:00
void closure_debug_destroy ( struct closure * cl )
{
unsigned long flags ;
BUG_ON ( cl - > magic ! = CLOSURE_MAGIC_ALIVE ) ;
cl - > magic = CLOSURE_MAGIC_DEAD ;
spin_lock_irqsave ( & closure_list_lock , flags ) ;
list_del ( & cl - > all ) ;
spin_unlock_irqrestore ( & closure_list_lock , flags ) ;
}
2017-03-18 03:35:23 +03:00
EXPORT_SYMBOL ( closure_debug_destroy ) ;
2013-03-24 03:11:31 +04:00
2020-10-01 09:50:44 +03:00
static int debug_show ( struct seq_file * f , void * data )
2013-03-24 03:11:31 +04:00
{
struct closure * cl ;
2018-08-11 08:19:45 +03:00
2013-03-24 03:11:31 +04:00
spin_lock_irq ( & closure_list_lock ) ;
list_for_each_entry ( cl , & closure_list , all ) {
int r = atomic_read ( & cl - > remaining ) ;
2018-08-11 08:19:51 +03:00
seq_printf ( f , " %p: %pS -> %pS p %p r %i " ,
2013-03-24 03:11:31 +04:00
cl , ( void * ) cl - > ip , cl - > fn , cl - > parent ,
r & CLOSURE_REMAINING_MASK ) ;
2018-01-08 23:21:25 +03:00
seq_printf ( f , " %s%s \n " ,
2015-10-05 15:39:52 +03:00
test_bit ( WORK_STRUCT_PENDING_BIT ,
2013-03-24 03:11:31 +04:00
work_data_bits ( & cl - > work ) ) ? " Q " : " " ,
2018-01-08 23:21:25 +03:00
r & CLOSURE_RUNNING ? " R " : " " ) ;
2013-03-24 03:11:31 +04:00
if ( r & CLOSURE_WAITING )
2018-08-11 08:19:51 +03:00
seq_printf ( f , " W %pS \n " ,
2013-03-24 03:11:31 +04:00
( void * ) cl - > waiting_on ) ;
2017-03-18 03:35:23 +03:00
seq_puts ( f , " \n " ) ;
2013-03-24 03:11:31 +04:00
}
spin_unlock_irq ( & closure_list_lock ) ;
return 0 ;
}
2020-10-01 09:50:44 +03:00
DEFINE_SHOW_ATTRIBUTE ( debug ) ;
2013-03-24 03:11:31 +04:00
2017-03-18 03:35:23 +03:00
static int __init closure_debug_init ( void )
2013-03-24 03:11:31 +04:00
{
2017-03-18 03:35:23 +03:00
debugfs_create_file ( " closures " , 0400 , NULL , NULL , & debug_fops ) ;
return 0 ;
2013-03-24 03:11:31 +04:00
}
2017-03-18 03:35:23 +03:00
late_initcall ( closure_debug_init )
2013-03-24 03:11:31 +04:00
2017-03-18 03:35:23 +03:00
# endif