2013-01-19 02:05:55 +04:00
/*
* kernel / workqueue_internal . h
*
* Workqueue internal header file . Only to be included by workqueue and
* core kernel subsystems .
*/
# ifndef _KERNEL_WORKQUEUE_INTERNAL_H
# define _KERNEL_WORKQUEUE_INTERNAL_H
2013-01-19 02:05:55 +04:00
# include <linux/workqueue.h>
2013-01-19 02:05:56 +04:00
# include <linux/kthread.h>
2013-01-19 02:05:55 +04:00
struct worker_pool ;
/*
* The poor guys doing the actual heavy lifting . All on - duty workers are
* either serving the manager role , on idle list or on busy hash . For
* details on the locking annotation ( L , I , X . . . ) , refer to workqueue . c .
*
* Only to be used in workqueue and async .
*/
struct worker {
/* on idle list while idle, on busy hash table while busy */
union {
struct list_head entry ; /* L: while idle */
struct hlist_node hentry ; /* L: while busy */
} ;
struct work_struct * current_work ; /* L: work being processed */
work_func_t current_func ; /* L: current_work's fn */
2013-02-14 07:29:12 +04:00
struct pool_workqueue * current_pwq ; /* L: current_work's pwq */
2013-05-01 02:27:22 +04:00
bool desc_valid ; /* ->desc is valid */
2013-01-19 02:05:55 +04:00
struct list_head scheduled ; /* L: scheduled works */
2013-05-01 02:27:22 +04:00
/* 64 bytes boundary on 64bit, 32 on 32bit */
2013-01-19 02:05:55 +04:00
struct task_struct * task ; /* I: worker task */
struct worker_pool * pool ; /* I: the associated pool */
2013-02-20 00:17:02 +04:00
/* L: for rescuers */
2014-05-20 13:46:34 +04:00
struct list_head node ; /* A: anchored at pool->workers */
/* A: runs through worker->node */
2013-05-01 02:27:22 +04:00
2013-01-19 02:05:55 +04:00
unsigned long last_active ; /* L: last active timestamp */
unsigned int flags ; /* X: flags */
int id ; /* I: worker id */
2013-05-01 02:27:22 +04:00
/*
* Opaque string set with work_set_desc ( ) . Printed out with task
* dump for debugging - WARN , BUG , panic or sysrq .
*/
char desc [ WORKER_DESC_LEN ] ;
2013-01-19 02:05:55 +04:00
/* used only by rescuers to point to the target workqueue */
struct workqueue_struct * rescue_wq ; /* I: the workqueue to rescue */
} ;
2013-01-19 02:05:56 +04:00
/**
* current_wq_worker - return struct worker if % current is a workqueue worker
*/
static inline struct worker * current_wq_worker ( void )
{
if ( current - > flags & PF_WQ_WORKER )
return kthread_data ( current ) ;
return NULL ;
}
2013-01-19 02:05:55 +04:00
/*
* Scheduler hooks for concurrency managed workqueue . Only to be used from
2013-06-04 11:40:24 +04:00
* sched / core . c and workqueue . c .
2013-01-19 02:05:55 +04:00
*/
2013-03-12 22:29:59 +04:00
void wq_worker_waking_up ( struct task_struct * task , int cpu ) ;
struct task_struct * wq_worker_sleeping ( struct task_struct * task , int cpu ) ;
2013-01-19 02:05:55 +04:00
# endif /* _KERNEL_WORKQUEUE_INTERNAL_H */