2019-10-22 10:25:58 -06:00
# ifndef INTERNAL_IO_WQ_H
# define INTERNAL_IO_WQ_H
2021-02-19 12:33:30 -07:00
# include <linux/refcount.h>
2022-06-16 13:57:19 +01:00
# include <linux/io_uring_types.h>
2020-10-14 10:48:51 -06:00
2019-10-22 10:25:58 -06:00
struct io_wq ;
enum {
IO_WQ_WORK_CANCEL = 1 ,
2020-06-25 18:20:53 +03:00
IO_WQ_WORK_HASHED = 2 ,
IO_WQ_WORK_UNBOUND = 4 ,
IO_WQ_WORK_CONCURRENT = 16 ,
2019-10-22 10:25:58 -06:00
IO_WQ_HASH_SHIFT = 24 , /* upper 8 bits are used for hash key */
} ;
enum io_wq_cancel {
IO_WQ_CANCEL_OK , /* cancelled before started */
IO_WQ_CANCEL_RUNNING , /* found, running, and attempted cancelled */
IO_WQ_CANCEL_NOTFOUND , /* work not found */
} ;
2021-02-04 13:52:08 +00:00
typedef struct io_wq_work * ( free_work_fn ) ( struct io_wq_work * ) ;
typedef void ( io_wq_work_fn ) ( struct io_wq_work * ) ;
2019-11-12 22:31:31 -07:00
2021-02-19 12:33:30 -07:00
struct io_wq_hash {
refcount_t refs ;
unsigned long map ;
struct wait_queue_head wait ;
} ;
static inline void io_wq_put_hash ( struct io_wq_hash * hash )
{
if ( refcount_dec_and_test ( & hash - > refs ) )
kfree ( hash ) ;
}
2019-11-25 08:49:20 -07:00
struct io_wq_data {
2021-02-19 12:33:30 -07:00
struct io_wq_hash * hash ;
2021-03-08 09:37:51 -07:00
struct task_struct * task ;
2020-06-08 21:08:20 +03:00
io_wq_work_fn * do_work ;
2020-03-04 16:14:12 +03:00
free_work_fn * free_work ;
2019-11-25 08:49:20 -07:00
} ;
struct io_wq * io_wq_create ( unsigned bounded , struct io_wq_data * data ) ;
2021-05-23 15:48:39 +01:00
void io_wq_exit_start ( struct io_wq * wq ) ;
2021-02-26 13:48:19 -07:00
void io_wq_put_and_exit ( struct io_wq * wq ) ;
2019-10-22 10:25:58 -06:00
void io_wq_enqueue ( struct io_wq * wq , struct io_wq_work * work ) ;
2020-03-14 00:31:04 +03:00
void io_wq_hash_work ( struct io_wq_work * work , void * val ) ;
2021-06-17 10:19:54 -06:00
int io_wq_cpu_affinity ( struct io_wq * wq , cpumask_var_t mask ) ;
2021-08-27 11:33:19 -06:00
int io_wq_max_workers ( struct io_wq * wq , int * new_count ) ;
2021-06-17 10:19:54 -06:00
2020-03-14 00:31:04 +03:00
static inline bool io_wq_is_hashed ( struct io_wq_work * work )
{
return work - > flags & IO_WQ_WORK_HASHED ;
}
2019-10-22 10:25:58 -06:00
2019-10-28 21:49:21 -06:00
typedef bool ( work_cancel_fn ) ( struct io_wq_work * , void * ) ;
enum io_wq_cancel io_wq_cancel_cb ( struct io_wq * wq , work_cancel_fn * cancel ,
2020-06-15 10:24:03 +03:00
void * data , bool cancel_all ) ;
2019-10-28 21:49:21 -06:00
2019-10-22 10:25:58 -06:00
# if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping ( struct task_struct * ) ;
extern void io_wq_worker_running ( struct task_struct * ) ;
# else
static inline void io_wq_worker_sleeping ( struct task_struct * tsk )
{
}
static inline void io_wq_worker_running ( struct task_struct * tsk )
{
}
2019-12-17 14:13:37 -07:00
# endif
2019-10-22 10:25:58 -06:00
2019-12-17 14:13:37 -07:00
static inline bool io_wq_current_is_worker ( void )
{
2021-02-16 14:15:30 -07:00
return in_task ( ) & & ( current - > flags & PF_IO_WORKER ) & &
2021-12-22 22:10:09 -06:00
current - > worker_private ;
2019-12-17 14:13:37 -07:00
}
# endif