2019-10-22 19:25:58 +03:00
# ifndef INTERNAL_IO_WQ_H
# define INTERNAL_IO_WQ_H
struct io_wq ;
enum {
IO_WQ_WORK_CANCEL = 1 ,
IO_WQ_WORK_HAS_MM = 2 ,
IO_WQ_WORK_HASHED = 4 ,
2019-11-07 21:41:16 +03:00
IO_WQ_WORK_UNBOUND = 32 ,
2019-11-13 08:31:31 +03:00
IO_WQ_WORK_INTERNAL = 64 ,
2019-11-20 23:05:32 +03:00
IO_WQ_WORK_CB = 128 ,
2019-12-12 05:29:43 +03:00
IO_WQ_WORK_NO_CANCEL = 256 ,
2019-12-17 18:46:33 +03:00
IO_WQ_WORK_CONCURRENT = 512 ,
2019-10-22 19:25:58 +03:00
IO_WQ_HASH_SHIFT = 24 , /* upper 8 bits are used for hash key */
} ;
enum io_wq_cancel {
IO_WQ_CANCEL_OK , /* cancelled before started */
IO_WQ_CANCEL_RUNNING , /* found, running, and attempted cancelled */
IO_WQ_CANCEL_NOTFOUND , /* work not found */
} ;
2019-11-26 21:59:32 +03:00
struct io_wq_work_node {
struct io_wq_work_node * next ;
} ;
struct io_wq_work_list {
struct io_wq_work_node * first ;
struct io_wq_work_node * last ;
} ;
static inline void wq_list_add_tail ( struct io_wq_work_node * node ,
struct io_wq_work_list * list )
{
if ( ! list - > first ) {
2019-12-08 07:06:46 +03:00
list - > last = node ;
WRITE_ONCE ( list - > first , node ) ;
2019-11-26 21:59:32 +03:00
} else {
list - > last - > next = node ;
list - > last = node ;
}
}
static inline void wq_node_del ( struct io_wq_work_list * list ,
struct io_wq_work_node * node ,
struct io_wq_work_node * prev )
{
if ( node = = list - > first )
2019-12-08 07:06:46 +03:00
WRITE_ONCE ( list - > first , node - > next ) ;
2019-11-26 21:59:32 +03:00
if ( node = = list - > last )
list - > last = prev ;
if ( prev )
prev - > next = node - > next ;
2019-12-05 03:19:44 +03:00
node - > next = NULL ;
2019-11-26 21:59:32 +03:00
}
# define wq_list_for_each(pos, prv, head) \
for ( pos = ( head ) - > first , prv = NULL ; pos ; prv = pos , pos = ( pos ) - > next )
2019-12-08 07:06:46 +03:00
# define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
2019-11-26 21:59:32 +03:00
# define INIT_WQ_LIST(list) do { \
( list ) - > first = NULL ; \
( list ) - > last = NULL ; \
} while ( 0 )
2019-10-22 19:25:58 +03:00
struct io_wq_work {
2019-11-20 23:05:32 +03:00
union {
2019-11-26 21:59:32 +03:00
struct io_wq_work_node list ;
2019-11-20 23:05:32 +03:00
void * data ;
} ;
2019-10-22 19:25:58 +03:00
void ( * func ) ( struct io_wq_work * * ) ;
2019-10-24 21:39:47 +03:00
struct files_struct * files ;
2020-01-28 02:34:48 +03:00
struct mm_struct * mm ;
const struct cred * creds ;
2020-02-07 07:42:51 +03:00
struct fs_struct * fs ;
2019-11-26 21:59:32 +03:00
unsigned flags ;
2020-02-09 05:16:39 +03:00
pid_t task_pid ;
2019-10-22 19:25:58 +03:00
} ;
2020-02-25 21:52:56 +03:00
# define INIT_IO_WORK(work, _func) \
do { \
* ( work ) = ( struct io_wq_work ) { . func = _func } ; \
} while ( 0 ) \
2019-10-22 19:25:58 +03:00
2019-11-13 08:31:31 +03:00
typedef void ( get_work_fn ) ( struct io_wq_work * ) ;
typedef void ( put_work_fn ) ( struct io_wq_work * ) ;
2019-11-25 18:49:20 +03:00
struct io_wq_data {
struct user_struct * user ;
get_work_fn * get_work ;
put_work_fn * put_work ;
} ;
struct io_wq * io_wq_create ( unsigned bounded , struct io_wq_data * data ) ;
2020-01-28 03:15:47 +03:00
bool io_wq_get ( struct io_wq * wq , struct io_wq_data * data ) ;
2019-10-22 19:25:58 +03:00
void io_wq_destroy ( struct io_wq * wq ) ;
void io_wq_enqueue ( struct io_wq * wq , struct io_wq_work * work ) ;
void io_wq_enqueue_hashed ( struct io_wq * wq , struct io_wq_work * work , void * val ) ;
void io_wq_flush ( struct io_wq * wq ) ;
void io_wq_cancel_all ( struct io_wq * wq ) ;
enum io_wq_cancel io_wq_cancel_work ( struct io_wq * wq , struct io_wq_work * cwork ) ;
2020-02-09 05:16:39 +03:00
enum io_wq_cancel io_wq_cancel_pid ( struct io_wq * wq , pid_t pid ) ;
2019-10-22 19:25:58 +03:00
2019-10-29 06:49:21 +03:00
typedef bool ( work_cancel_fn ) ( struct io_wq_work * , void * ) ;
enum io_wq_cancel io_wq_cancel_cb ( struct io_wq * wq , work_cancel_fn * cancel ,
void * data ) ;
2019-10-22 19:25:58 +03:00
# if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping ( struct task_struct * ) ;
extern void io_wq_worker_running ( struct task_struct * ) ;
# else
static inline void io_wq_worker_sleeping ( struct task_struct * tsk )
{
}
static inline void io_wq_worker_running ( struct task_struct * tsk )
{
}
2019-12-18 00:13:37 +03:00
# endif
2019-10-22 19:25:58 +03:00
2019-12-18 00:13:37 +03:00
static inline bool io_wq_current_is_worker ( void )
{
return in_task ( ) & & ( current - > flags & PF_IO_WORKER ) ;
}
# endif