2012-05-11 04:59:07 +04:00
# include <linux/spinlock.h>
# include <linux/task_work.h>
# include <linux/tracehook.h>
2012-08-26 23:12:11 +04:00
static struct callback_head work_exited ; /* all we need is ->next == NULL */
2012-05-11 04:59:07 +04:00
int
2012-08-26 23:12:09 +04:00
task_work_add ( struct task_struct * task , struct callback_head * work , bool notify )
2012-05-11 04:59:07 +04:00
{
2012-08-26 23:12:09 +04:00
struct callback_head * head ;
2012-08-26 23:12:11 +04:00
2012-08-26 23:12:09 +04:00
do {
head = ACCESS_ONCE ( task - > task_works ) ;
2012-08-26 23:12:11 +04:00
if ( unlikely ( head = = & work_exited ) )
return - ESRCH ;
2012-08-26 23:12:09 +04:00
work - > next = head ;
} while ( cmpxchg ( & task - > task_works , head , work ) ! = head ) ;
2012-05-11 04:59:07 +04:00
2012-06-27 11:31:24 +04:00
if ( notify )
2012-05-11 04:59:07 +04:00
set_notify_resume ( task ) ;
2012-06-27 11:31:24 +04:00
return 0 ;
2012-05-11 04:59:07 +04:00
}
2012-06-27 11:07:19 +04:00
struct callback_head *
2012-05-11 04:59:07 +04:00
task_work_cancel ( struct task_struct * task , task_work_func_t func )
{
2012-08-26 23:12:09 +04:00
struct callback_head * * pprev = & task - > task_works ;
struct callback_head * work = NULL ;
2012-05-11 04:59:07 +04:00
unsigned long flags ;
2012-08-26 23:12:09 +04:00
/*
* If cmpxchg ( ) fails we continue without updating pprev .
* Either we raced with task_work_add ( ) which added the
* new entry before this work , we will find it again . Or
2012-08-26 23:12:11 +04:00
* we raced with task_work_run ( ) , * pprev = = NULL / exited .
2012-08-26 23:12:09 +04:00
*/
2012-05-11 04:59:07 +04:00
raw_spin_lock_irqsave ( & task - > pi_lock , flags ) ;
2012-08-26 23:12:09 +04:00
while ( ( work = ACCESS_ONCE ( * pprev ) ) ) {
read_barrier_depends ( ) ;
if ( work - > func ! = func )
pprev = & work - > next ;
else if ( cmpxchg ( pprev , work , work - > next ) = = work )
break ;
2012-05-11 04:59:07 +04:00
}
raw_spin_unlock_irqrestore ( & task - > pi_lock , flags ) ;
2012-08-26 23:12:09 +04:00
return work ;
2012-05-11 04:59:07 +04:00
}
void task_work_run ( void )
{
struct task_struct * task = current ;
2012-08-26 23:12:09 +04:00
struct callback_head * work , * head , * next ;
2012-05-11 04:59:07 +04:00
2012-08-26 23:12:09 +04:00
for ( ; ; ) {
2012-08-26 23:12:11 +04:00
/*
* work - > func ( ) can do task_work_add ( ) , do not set
* work_exited unless the list is empty .
*/
do {
work = ACCESS_ONCE ( task - > task_works ) ;
head = ! work & & ( task - > flags & PF_EXITING ) ?
& work_exited : NULL ;
} while ( cmpxchg ( & task - > task_works , work , head ) ! = work ) ;
2012-08-26 23:12:09 +04:00
if ( ! work )
break ;
/*
* Synchronize with task_work_cancel ( ) . It can ' t remove
* the first entry = = work , cmpxchg ( task_works ) should
* fail , but it can play with * work and other entries .
*/
raw_spin_unlock_wait ( & task - > pi_lock ) ;
smp_mb ( ) ;
2012-05-11 04:59:07 +04:00
2012-08-26 23:12:09 +04:00
/* Reverse the list to run the works in fifo order */
head = NULL ;
do {
next = work - > next ;
work - > next = head ;
head = work ;
work = next ;
} while ( work ) ;
2012-05-11 04:59:07 +04:00
2012-08-26 23:12:09 +04:00
work = head ;
do {
next = work - > next ;
work - > func ( work ) ;
work = next ;
2012-08-21 17:05:14 +04:00
cond_resched ( ) ;
2012-08-26 23:12:09 +04:00
} while ( work ) ;
2012-05-11 04:59:07 +04:00
}
}