2012-05-11 04:59:07 +04:00
# include <linux/spinlock.h>
# include <linux/task_work.h>
# include <linux/tracehook.h>
2012-08-26 23:12:11 +04:00
static struct callback_head work_exited ; /* all we need is ->next == NULL */
2013-09-12 01:23:31 +04:00
/**
* task_work_add - ask the @ task to execute @ work - > func ( )
* @ task : the task which should run the callback
* @ work : the callback to run
* @ notify : send the notification if true
*
* Queue @ work for task_work_run ( ) below and notify the @ task if @ notify .
* Fails if the @ task is exiting / exited and thus it can ' t process this @ work .
* Otherwise @ work - > func ( ) will be called when the @ task returns from kernel
* mode or exits .
*
* This is like the signal handler which runs in kernel mode , but it doesn ' t
* try to wake up the @ task .
*
2015-08-29 05:42:30 +03:00
* Note : there is no ordering guarantee on works queued here .
*
2013-09-12 01:23:31 +04:00
* RETURNS :
* 0 if succeeds or - ESRCH .
*/
2012-05-11 04:59:07 +04:00
int
2012-08-26 23:12:09 +04:00
task_work_add ( struct task_struct * task , struct callback_head * work , bool notify )
2012-05-11 04:59:07 +04:00
{
2012-08-26 23:12:09 +04:00
struct callback_head * head ;
2012-08-26 23:12:11 +04:00
2012-08-26 23:12:09 +04:00
do {
head = ACCESS_ONCE ( task - > task_works ) ;
2012-08-26 23:12:11 +04:00
if ( unlikely ( head = = & work_exited ) )
return - ESRCH ;
2012-08-26 23:12:09 +04:00
work - > next = head ;
} while ( cmpxchg ( & task - > task_works , head , work ) ! = head ) ;
2012-05-11 04:59:07 +04:00
2012-06-27 11:31:24 +04:00
if ( notify )
2012-05-11 04:59:07 +04:00
set_notify_resume ( task ) ;
2012-06-27 11:31:24 +04:00
return 0 ;
2012-05-11 04:59:07 +04:00
}
2013-09-12 01:23:31 +04:00
/**
* task_work_cancel - cancel a pending work added by task_work_add ( )
* @ task : the task which should execute the work
* @ func : identifies the work to remove
*
* Find the last queued pending work with - > func = = @ func and remove
* it from queue .
*
* RETURNS :
* The found work or NULL if not found .
*/
2012-06-27 11:07:19 +04:00
struct callback_head *
2012-05-11 04:59:07 +04:00
task_work_cancel ( struct task_struct * task , task_work_func_t func )
{
2012-08-26 23:12:09 +04:00
struct callback_head * * pprev = & task - > task_works ;
2013-09-12 01:23:30 +04:00
struct callback_head * work ;
2012-05-11 04:59:07 +04:00
unsigned long flags ;
2012-08-26 23:12:09 +04:00
/*
* If cmpxchg ( ) fails we continue without updating pprev .
* Either we raced with task_work_add ( ) which added the
* new entry before this work , we will find it again . Or
2012-08-26 23:12:11 +04:00
* we raced with task_work_run ( ) , * pprev = = NULL / exited .
2012-08-26 23:12:09 +04:00
*/
2012-05-11 04:59:07 +04:00
raw_spin_lock_irqsave ( & task - > pi_lock , flags ) ;
2012-08-26 23:12:09 +04:00
while ( ( work = ACCESS_ONCE ( * pprev ) ) ) {
2013-09-12 01:23:30 +04:00
smp_read_barrier_depends ( ) ;
2012-08-26 23:12:09 +04:00
if ( work - > func ! = func )
pprev = & work - > next ;
else if ( cmpxchg ( pprev , work , work - > next ) = = work )
break ;
2012-05-11 04:59:07 +04:00
}
raw_spin_unlock_irqrestore ( & task - > pi_lock , flags ) ;
2012-08-26 23:12:09 +04:00
return work ;
2012-05-11 04:59:07 +04:00
}
2013-09-12 01:23:31 +04:00
/**
* task_work_run - execute the works added by task_work_add ( )
*
* Flush the pending works . Should be used by the core kernel code .
* Called before the task returns to the user - mode or stops , or when
* it exits . In the latter case task_work_add ( ) can no longer add the
* new work after task_work_run ( ) returns .
*/
2012-05-11 04:59:07 +04:00
void task_work_run ( void )
{
struct task_struct * task = current ;
2012-08-26 23:12:09 +04:00
struct callback_head * work , * head , * next ;
2012-05-11 04:59:07 +04:00
2012-08-26 23:12:09 +04:00
for ( ; ; ) {
2012-08-26 23:12:11 +04:00
/*
* work - > func ( ) can do task_work_add ( ) , do not set
* work_exited unless the list is empty .
*/
do {
work = ACCESS_ONCE ( task - > task_works ) ;
head = ! work & & ( task - > flags & PF_EXITING ) ?
& work_exited : NULL ;
} while ( cmpxchg ( & task - > task_works , work , head ) ! = work ) ;
2012-08-26 23:12:09 +04:00
if ( ! work )
break ;
/*
* Synchronize with task_work_cancel ( ) . It can ' t remove
* the first entry = = work , cmpxchg ( task_works ) should
* fail , but it can play with * work and other entries .
*/
raw_spin_unlock_wait ( & task - > pi_lock ) ;
smp_mb ( ) ;
2012-05-11 04:59:07 +04:00
2012-08-26 23:12:09 +04:00
do {
next = work - > next ;
work - > func ( work ) ;
work = next ;
2012-08-21 17:05:14 +04:00
cond_resched ( ) ;
2012-08-26 23:12:09 +04:00
} while ( work ) ;
2012-05-11 04:59:07 +04:00
}
}