2022-05-24 21:45:38 +03:00
# ifndef IOU_CORE_H
# define IOU_CORE_H
# include <linux/errno.h>
2022-05-25 06:54:43 +03:00
# include <linux/lockdep.h>
2022-06-16 15:57:19 +03:00
# include <linux/io_uring_types.h>
2022-11-20 20:18:45 +03:00
# include <uapi/linux/eventpoll.h>
2022-06-16 15:57:19 +03:00
# include "io-wq.h"
2022-06-21 12:09:01 +03:00
# include "slist.h"
2022-06-16 15:57:19 +03:00
# include "filetable.h"
2022-05-24 21:45:38 +03:00
2022-06-13 16:27:03 +03:00
# ifndef CREATE_TRACE_POINTS
# include <trace/events/io_uring.h>
# endif
2022-05-25 00:21:00 +03:00
enum {
IOU_OK = 0 ,
IOU_ISSUE_SKIP_COMPLETE = - EIOCBQUEUED ,
2022-06-30 12:12:25 +03:00
/*
2022-11-17 21:40:16 +03:00
* Intended only when both IO_URING_F_MULTISHOT is passed
* to indicate to the poll runner that multishot should be
2022-06-30 12:12:25 +03:00
* removed and the result is set on req - > cqe . res .
*/
IOU_STOP_MULTISHOT = - ECANCELED ,
2022-05-25 00:21:00 +03:00
} ;
2022-09-23 16:53:25 +03:00
struct io_uring_cqe * __io_get_cqe ( struct io_ring_ctx * ctx , bool overflow ) ;
2022-06-17 11:48:02 +03:00
bool io_req_cqe_overflow ( struct io_kiocb * req ) ;
2022-08-30 15:50:10 +03:00
int io_run_task_work_sig ( struct io_ring_ctx * ctx ) ;
2022-10-27 17:44:29 +03:00
int __io_run_local_work ( struct io_ring_ctx * ctx , bool * locked ) ;
2022-08-30 15:50:10 +03:00
int io_run_local_work ( struct io_ring_ctx * ctx ) ;
2022-11-24 12:35:53 +03:00
void io_req_defer_failed ( struct io_kiocb * req , s32 res ) ;
2022-11-23 14:33:41 +03:00
void io_req_complete_post ( struct io_kiocb * req , unsigned issue_flags ) ;
2022-11-24 12:35:58 +03:00
bool io_post_aux_cqe ( struct io_ring_ctx * ctx , u64 user_data , s32 res , u32 cflags ) ;
2022-11-24 12:35:55 +03:00
bool io_aux_cqe ( struct io_ring_ctx * ctx , bool defer , u64 user_data , s32 res , u32 cflags ,
bool allow_overflow ) ;
2022-06-19 14:26:05 +03:00
void __io_commit_cqring_flush ( struct io_ring_ctx * ctx ) ;
struct page * * io_pin_pages ( unsigned long ubuf , unsigned long len , int * npages ) ;
struct file * io_file_get_normal ( struct io_kiocb * req , int fd ) ;
struct file * io_file_get_fixed ( struct io_kiocb * req , int fd ,
unsigned issue_flags ) ;
2022-07-21 18:06:47 +03:00
static inline bool io_req_ffs_set ( struct io_kiocb * req )
{
return req - > flags & REQ_F_FIXED_FILE ;
}
2022-11-11 19:54:08 +03:00
void __io_req_task_work_add ( struct io_kiocb * req , bool allow_local ) ;
2022-06-19 14:26:05 +03:00
bool io_is_uring_fops ( struct file * file ) ;
bool io_alloc_async_data ( struct io_kiocb * req ) ;
void io_req_task_queue ( struct io_kiocb * req ) ;
void io_queue_iowq ( struct io_kiocb * req , bool * dont_use ) ;
void io_req_task_complete ( struct io_kiocb * req , bool * locked ) ;
void io_req_task_queue_fail ( struct io_kiocb * req , int ret ) ;
void io_req_task_submit ( struct io_kiocb * req , bool * locked ) ;
void tctx_task_work ( struct callback_head * cb ) ;
__cold void io_uring_cancel_generic ( bool cancel_all , struct io_sq_data * sqd ) ;
int io_uring_alloc_task_context ( struct task_struct * task ,
struct io_ring_ctx * ctx ) ;
int io_poll_issue ( struct io_kiocb * req , bool * locked ) ;
int io_submit_sqes ( struct io_ring_ctx * ctx , unsigned int nr ) ;
int io_do_iopoll ( struct io_ring_ctx * ctx , bool force_nonspin ) ;
void io_free_batch_list ( struct io_ring_ctx * ctx , struct io_wq_work_node * node ) ;
int io_req_prep_async ( struct io_kiocb * req ) ;
struct io_wq_work * io_wq_free_work ( struct io_wq_work * work ) ;
void io_wq_submit_work ( struct io_wq_work * work ) ;
void io_free_req ( struct io_kiocb * req ) ;
void io_queue_next ( struct io_kiocb * req ) ;
2022-07-12 23:52:37 +03:00
void __io_put_task ( struct task_struct * task , int nr ) ;
2022-07-12 23:52:47 +03:00
void io_task_refs_refill ( struct io_uring_task * tctx ) ;
2022-07-27 12:30:40 +03:00
bool __io_alloc_req_refill ( struct io_ring_ctx * ctx ) ;
2022-06-19 14:26:05 +03:00
bool io_match_task_safe ( struct io_kiocb * head , struct task_struct * task ,
bool cancel_all ) ;
2023-01-04 04:34:57 +03:00
# define io_lockdep_assert_cq_locked(ctx) \
do { \
if ( ctx - > flags & IORING_SETUP_IOPOLL ) { \
lockdep_assert_held ( & ctx - > uring_lock ) ; \
} else if ( ! ctx - > task_complete ) { \
lockdep_assert_held ( & ctx - > completion_lock ) ; \
} else if ( ctx - > submitter_task - > flags & PF_EXITING ) { \
lockdep_assert ( current_work ( ) ) ; \
} else { \
lockdep_assert ( current = = ctx - > submitter_task ) ; \
} \
} while ( 0 )
2022-11-11 19:54:08 +03:00
static inline void io_req_task_work_add ( struct io_kiocb * req )
{
__io_req_task_work_add ( req , true ) ;
}
2022-06-19 14:26:05 +03:00
# define io_for_each_link(pos, head) \
for ( pos = ( head ) ; pos ; pos = pos - > link )
2022-06-13 16:27:03 +03:00
2022-06-20 03:25:56 +03:00
void io_cq_unlock_post ( struct io_ring_ctx * ctx ) ;
2022-09-23 16:53:25 +03:00
static inline struct io_uring_cqe * io_get_cqe_overflow ( struct io_ring_ctx * ctx ,
bool overflow )
2022-06-13 16:27:03 +03:00
{
2023-01-04 04:34:57 +03:00
io_lockdep_assert_cq_locked ( ctx ) ;
2022-06-13 16:27:03 +03:00
if ( likely ( ctx - > cqe_cached < ctx - > cqe_sentinel ) ) {
struct io_uring_cqe * cqe = ctx - > cqe_cached ;
ctx - > cached_cq_tail + + ;
ctx - > cqe_cached + + ;
2022-06-17 11:48:05 +03:00
if ( ctx - > flags & IORING_SETUP_CQE32 )
ctx - > cqe_cached + + ;
2022-06-13 16:27:03 +03:00
return cqe ;
}
2022-09-23 16:53:25 +03:00
return __io_get_cqe ( ctx , overflow ) ;
}
static inline struct io_uring_cqe * io_get_cqe ( struct io_ring_ctx * ctx )
{
return io_get_cqe_overflow ( ctx , false ) ;
2022-06-13 16:27:03 +03:00
}
static inline bool __io_fill_cqe_req ( struct io_ring_ctx * ctx ,
struct io_kiocb * req )
{
struct io_uring_cqe * cqe ;
2022-06-17 11:48:04 +03:00
/*
* If we can ' t get a cq entry , userspace overflowed the
* submission ( by quite a lot ) . Increment the overflow count in
* the ring .
*/
cqe = io_get_cqe ( ctx ) ;
if ( unlikely ( ! cqe ) )
2022-12-07 18:50:01 +03:00
return false ;
2022-06-30 12:12:31 +03:00
trace_io_uring_complete ( req - > ctx , req , req - > cqe . user_data ,
req - > cqe . res , req - > cqe . flags ,
( req - > flags & REQ_F_CQE32_INIT ) ? req - > extra1 : 0 ,
( req - > flags & REQ_F_CQE32_INIT ) ? req - > extra2 : 0 ) ;
2022-06-17 11:48:04 +03:00
memcpy ( cqe , & req - > cqe , sizeof ( * cqe ) ) ;
if ( ctx - > flags & IORING_SETUP_CQE32 ) {
2022-06-13 16:27:03 +03:00
u64 extra1 = 0 , extra2 = 0 ;
if ( req - > flags & REQ_F_CQE32_INIT ) {
extra1 = req - > extra1 ;
extra2 = req - > extra2 ;
}
2022-06-17 11:48:04 +03:00
WRITE_ONCE ( cqe - > big_cqe [ 0 ] , extra1 ) ;
WRITE_ONCE ( cqe - > big_cqe [ 1 ] , extra2 ) ;
2022-06-13 16:27:03 +03:00
}
2022-06-17 11:48:04 +03:00
return true ;
2022-06-13 16:27:03 +03:00
}
2022-12-07 18:50:01 +03:00
static inline bool io_fill_cqe_req ( struct io_ring_ctx * ctx ,
struct io_kiocb * req )
{
if ( likely ( __io_fill_cqe_req ( ctx , req ) ) )
return true ;
return io_req_cqe_overflow ( req ) ;
}
2022-05-25 06:19:47 +03:00
static inline void req_set_fail ( struct io_kiocb * req )
{
req - > flags | = REQ_F_FAIL ;
if ( req - > flags & REQ_F_CQE_SKIP ) {
req - > flags & = ~ REQ_F_CQE_SKIP ;
req - > flags | = REQ_F_SKIP_LINK_CQES ;
}
}
2022-05-24 21:45:38 +03:00
static inline void io_req_set_res ( struct io_kiocb * req , s32 res , u32 cflags )
{
req - > cqe . res = res ;
req - > cqe . flags = cflags ;
}
2022-05-25 14:59:19 +03:00
static inline bool req_has_async_data ( struct io_kiocb * req )
{
return req - > flags & REQ_F_ASYNC_DATA ;
}
2022-05-25 06:19:47 +03:00
static inline void io_put_file ( struct file * file )
{
if ( file )
fput ( file ) ;
}
2022-05-25 06:54:43 +03:00
static inline void io_ring_submit_unlock ( struct io_ring_ctx * ctx ,
unsigned issue_flags )
{
lockdep_assert_held ( & ctx - > uring_lock ) ;
if ( issue_flags & IO_URING_F_UNLOCKED )
mutex_unlock ( & ctx - > uring_lock ) ;
}
static inline void io_ring_submit_lock ( struct io_ring_ctx * ctx ,
unsigned issue_flags )
{
/*
* " Normal " inline submissions always hold the uring_lock , since we
* grab it from the system call . Same is true for the SQPOLL offload .
* The only exception is when we ' ve detached the request and issue it
* from an async worker thread , grab the lock for that case .
*/
if ( issue_flags & IO_URING_F_UNLOCKED )
mutex_lock ( & ctx - > uring_lock ) ;
lockdep_assert_held ( & ctx - > uring_lock ) ;
}
2022-05-25 15:25:13 +03:00
static inline void io_commit_cqring ( struct io_ring_ctx * ctx )
{
/* order cqe stores with ring update */
smp_store_release ( & ctx - > rings - > cq . tail , ctx - > cached_cq_tail ) ;
}
2022-10-06 04:06:10 +03:00
/* requires smb_mb() prior, see wq_has_sleeper() */
static inline void __io_cqring_wake ( struct io_ring_ctx * ctx )
2022-06-13 16:27:03 +03:00
{
/*
2022-11-20 20:18:45 +03:00
* Trigger waitqueue handler on all waiters on our waitqueue . This
* won ' t necessarily wake up all the tasks , io_should_wake ( ) will make
* that decision .
*
* Pass in EPOLLIN | EPOLL_URING_WAKE as the poll wakeup key . The latter
* set in the mask so that if we recurse back into our own poll
* waitqueue handlers , we know we have a dependency between eventfd or
* epoll and should terminate multishot poll at that point .
2022-06-13 16:27:03 +03:00
*/
2022-10-06 04:06:10 +03:00
if ( waitqueue_active ( & ctx - > cq_wait ) )
2022-11-20 20:18:45 +03:00
__wake_up ( & ctx - > cq_wait , TASK_NORMAL , 0 ,
poll_to_key ( EPOLL_URING_WAKE | EPOLLIN ) ) ;
2022-06-13 16:27:03 +03:00
}
2022-10-06 04:06:10 +03:00
static inline void io_cqring_wake ( struct io_ring_ctx * ctx )
{
smp_mb ( ) ;
__io_cqring_wake ( ctx ) ;
}
2022-05-25 18:13:39 +03:00
static inline bool io_sqring_full ( struct io_ring_ctx * ctx )
{
struct io_rings * r = ctx - > rings ;
return READ_ONCE ( r - > sq . tail ) - ctx - > cached_sq_head = = ctx - > sq_entries ;
}
static inline unsigned int io_sqring_entries ( struct io_ring_ctx * ctx )
{
struct io_rings * rings = ctx - > rings ;
/* make sure SQ entry isn't read before tail */
return smp_load_acquire ( & rings - > sq . tail ) - ctx - > cached_sq_head ;
}
2022-08-30 15:50:10 +03:00
static inline int io_run_task_work ( void )
2022-05-25 18:13:39 +03:00
{
2022-11-25 19:36:29 +03:00
/*
* Always check - and - clear the task_work notification signal . With how
* signaling works for task_work , we can find it set with nothing to
* run . We need to clear it for that case , like get_signal ( ) does .
*/
if ( test_thread_flag ( TIF_NOTIFY_SIGNAL ) )
clear_notify_signal ( ) ;
2022-09-30 00:29:13 +03:00
if ( task_work_pending ( current ) ) {
2022-05-25 18:13:39 +03:00
__set_current_state ( TASK_RUNNING ) ;
2022-09-30 00:29:13 +03:00
task_work_run ( ) ;
2022-08-30 15:50:10 +03:00
return 1 ;
2022-05-25 18:13:39 +03:00
}
2022-08-30 15:50:10 +03:00
return 0 ;
}
2022-09-03 18:52:01 +03:00
static inline bool io_task_work_pending ( struct io_ring_ctx * ctx )
{
2022-12-17 23:40:17 +03:00
return task_work_pending ( current ) | | ! wq_list_empty ( & ctx - > work_llist ) ;
2022-09-03 18:52:01 +03:00
}
2022-08-30 15:50:10 +03:00
static inline int io_run_task_work_ctx ( struct io_ring_ctx * ctx )
{
int ret = 0 ;
int ret2 ;
if ( ctx - > flags & IORING_SETUP_DEFER_TASKRUN )
ret = io_run_local_work ( ctx ) ;
/* want to run this after in case more is added */
ret2 = io_run_task_work ( ) ;
/* Try propagate error in favour of if tasks were run,
* but still make sure to run them if requested
*/
if ( ret > = 0 )
ret + = ret2 ;
return ret ;
2022-05-25 18:13:39 +03:00
}
2022-10-06 23:42:33 +03:00
static inline int io_run_local_work_locked ( struct io_ring_ctx * ctx )
{
2022-10-27 17:44:29 +03:00
bool locked ;
int ret ;
2022-10-06 23:42:33 +03:00
if ( llist_empty ( & ctx - > work_llist ) )
return 0 ;
2022-10-27 17:44:29 +03:00
locked = true ;
ret = __io_run_local_work ( ctx , & locked ) ;
/* shouldn't happen! */
if ( WARN_ON_ONCE ( ! locked ) )
mutex_lock ( & ctx - > uring_lock ) ;
return ret ;
2022-10-06 23:42:33 +03:00
}
2022-06-15 19:33:51 +03:00
static inline void io_tw_lock ( struct io_ring_ctx * ctx , bool * locked )
{
if ( ! * locked ) {
mutex_lock ( & ctx - > uring_lock ) ;
* locked = true ;
}
}
2022-06-20 03:26:00 +03:00
/*
* Don ' t complete immediately but use deferred completion infrastructure .
* Protected by - > uring_lock and can only be used either with
* IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex .
*/
static inline void io_req_complete_defer ( struct io_kiocb * req )
__must_hold ( & req - > ctx - > uring_lock )
2022-06-15 19:33:51 +03:00
{
struct io_submit_state * state = & req - > ctx - > submit_state ;
2022-06-20 03:26:00 +03:00
lockdep_assert_held ( & req - > ctx - > uring_lock ) ;
2022-06-15 19:33:51 +03:00
wq_list_add_tail ( & req - > comp_list , & state - > compl_reqs ) ;
}
2022-06-20 03:25:57 +03:00
static inline void io_commit_cqring_flush ( struct io_ring_ctx * ctx )
{
if ( unlikely ( ctx - > off_timeout_used | | ctx - > drain_active | | ctx - > has_evfd ) )
__io_commit_cqring_flush ( ctx ) ;
}
2022-07-12 23:52:37 +03:00
/* must to be called somewhat shortly after putting a request */
static inline void io_put_task ( struct task_struct * task , int nr )
{
if ( likely ( task = = current ) )
task - > io_uring - > cached_refs + = nr ;
else
__io_put_task ( task , nr ) ;
}
2022-07-12 23:52:47 +03:00
static inline void io_get_task_refs ( int nr )
{
struct io_uring_task * tctx = current - > io_uring ;
tctx - > cached_refs - = nr ;
if ( unlikely ( tctx - > cached_refs < 0 ) )
io_task_refs_refill ( tctx ) ;
}
2022-07-27 12:30:40 +03:00
static inline bool io_req_cache_empty ( struct io_ring_ctx * ctx )
{
return ! ctx - > submit_state . free_list . next ;
}
static inline bool io_alloc_req_refill ( struct io_ring_ctx * ctx )
{
if ( unlikely ( io_req_cache_empty ( ctx ) ) )
return __io_alloc_req_refill ( ctx ) ;
return true ;
}
static inline struct io_kiocb * io_alloc_req ( struct io_ring_ctx * ctx )
{
struct io_wq_work_node * node ;
node = wq_stack_extract ( & ctx - > submit_state . free_list ) ;
return container_of ( node , struct io_kiocb , comp_list ) ;
}
2022-09-08 18:56:52 +03:00
static inline bool io_allowed_run_tw ( struct io_ring_ctx * ctx )
{
2022-09-08 18:56:53 +03:00
return likely ( ! ( ctx - > flags & IORING_SETUP_DEFER_TASKRUN ) | |
ctx - > submitter_task = = current ) ;
2022-09-08 18:56:52 +03:00
}
2022-11-23 14:33:39 +03:00
static inline void io_req_queue_tw_complete ( struct io_kiocb * req , s32 res )
{
io_req_set_res ( req , res , 0 ) ;
req - > io_task_work . func = io_req_task_complete ;
io_req_task_work_add ( req ) ;
}
2022-05-24 21:45:38 +03:00
# endif