2022-05-26 05:36:47 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/namei.h>
# include <linux/io_uring.h>
# include <uapi/linux/io_uring.h>
# include "io_uring_types.h"
# include "io_uring.h"
# include "tctx.h"
# include "poll.h"
# include "timeout.h"
# include "cancel.h"
struct io_cancel {
struct file * file ;
u64 addr ;
u32 flags ;
s32 fd ;
} ;
# define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
IORING_ASYNC_CANCEL_ANY )
static bool io_cancel_cb ( struct io_wq_work * work , void * data )
{
struct io_kiocb * req = container_of ( work , struct io_kiocb , work ) ;
struct io_cancel_data * cd = data ;
if ( req - > ctx ! = cd - > ctx )
return false ;
if ( cd - > flags & IORING_ASYNC_CANCEL_ANY ) {
;
} else if ( cd - > flags & IORING_ASYNC_CANCEL_FD ) {
if ( req - > file ! = cd - > file )
return false ;
} else {
if ( req - > cqe . user_data ! = cd - > data )
return false ;
}
if ( cd - > flags & ( IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_ANY ) ) {
if ( cd - > seq = = req - > work . cancel_seq )
return false ;
req - > work . cancel_seq = cd - > seq ;
}
return true ;
}
static int io_async_cancel_one ( struct io_uring_task * tctx ,
struct io_cancel_data * cd )
{
enum io_wq_cancel cancel_ret ;
int ret = 0 ;
bool all ;
if ( ! tctx | | ! tctx - > io_wq )
return - ENOENT ;
all = cd - > flags & ( IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_ANY ) ;
cancel_ret = io_wq_cancel_cb ( tctx - > io_wq , io_cancel_cb , cd , all ) ;
switch ( cancel_ret ) {
case IO_WQ_CANCEL_OK :
ret = 0 ;
break ;
case IO_WQ_CANCEL_RUNNING :
ret = - EALREADY ;
break ;
case IO_WQ_CANCEL_NOTFOUND :
ret = - ENOENT ;
break ;
}
return ret ;
}
int io_try_cancel ( struct io_kiocb * req , struct io_cancel_data * cd )
{
struct io_ring_ctx * ctx = req - > ctx ;
int ret ;
WARN_ON_ONCE ( ! io_wq_current_is_worker ( ) & & req - > task ! = current ) ;
ret = io_async_cancel_one ( req - > task - > io_uring , cd ) ;
/*
* Fall - through even for - EALREADY , as we may have poll armed
* that need unarming .
*/
if ( ! ret )
return 0 ;
ret = io_poll_cancel ( ctx , cd ) ;
if ( ret ! = - ENOENT )
2022-06-16 12:22:04 +03:00
return ret ;
2022-06-16 12:22:02 +03:00
spin_lock ( & ctx - > completion_lock ) ;
2022-05-26 05:36:47 +03:00
if ( ! ( cd - > flags & IORING_ASYNC_CANCEL_FD ) )
ret = io_timeout_cancel ( ctx , cd ) ;
spin_unlock ( & ctx - > completion_lock ) ;
return ret ;
}
int io_async_cancel_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
struct io_cancel * cancel = io_kiocb_to_cmd ( req ) ;
if ( unlikely ( req - > flags & REQ_F_BUFFER_SELECT ) )
return - EINVAL ;
if ( sqe - > off | | sqe - > len | | sqe - > splice_fd_in )
return - EINVAL ;
cancel - > addr = READ_ONCE ( sqe - > addr ) ;
cancel - > flags = READ_ONCE ( sqe - > cancel_flags ) ;
if ( cancel - > flags & ~ CANCEL_FLAGS )
return - EINVAL ;
if ( cancel - > flags & IORING_ASYNC_CANCEL_FD ) {
if ( cancel - > flags & IORING_ASYNC_CANCEL_ANY )
return - EINVAL ;
cancel - > fd = READ_ONCE ( sqe - > fd ) ;
}
return 0 ;
}
static int __io_async_cancel ( struct io_cancel_data * cd , struct io_kiocb * req ,
unsigned int issue_flags )
{
bool all = cd - > flags & ( IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_ANY ) ;
struct io_ring_ctx * ctx = cd - > ctx ;
struct io_tctx_node * node ;
int ret , nr = 0 ;
do {
ret = io_try_cancel ( req , cd ) ;
if ( ret = = - ENOENT )
break ;
if ( ! all )
return ret ;
nr + + ;
} while ( 1 ) ;
/* slow path, try all io-wq's */
io_ring_submit_lock ( ctx , issue_flags ) ;
ret = - ENOENT ;
list_for_each_entry ( node , & ctx - > tctx_list , ctx_node ) {
struct io_uring_task * tctx = node - > task - > io_uring ;
ret = io_async_cancel_one ( tctx , cd ) ;
if ( ret ! = - ENOENT ) {
if ( ! all )
break ;
nr + + ;
}
}
io_ring_submit_unlock ( ctx , issue_flags ) ;
return all ? nr : ret ;
}
int io_async_cancel ( struct io_kiocb * req , unsigned int issue_flags )
{
struct io_cancel * cancel = io_kiocb_to_cmd ( req ) ;
struct io_cancel_data cd = {
. ctx = req - > ctx ,
. data = cancel - > addr ,
. flags = cancel - > flags ,
. seq = atomic_inc_return ( & req - > ctx - > cancel_seq ) ,
} ;
int ret ;
if ( cd . flags & IORING_ASYNC_CANCEL_FD ) {
if ( req - > flags & REQ_F_FIXED_FILE )
req - > file = io_file_get_fixed ( req , cancel - > fd ,
issue_flags ) ;
else
req - > file = io_file_get_normal ( req , cancel - > fd ) ;
if ( ! req - > file ) {
ret = - EBADF ;
goto done ;
}
cd . file = req - > file ;
}
ret = __io_async_cancel ( & cd , req , issue_flags ) ;
done :
if ( ret < 0 )
req_set_fail ( req ) ;
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
2022-06-16 12:22:02 +03:00
2022-06-16 12:22:10 +03:00
void init_hash_table ( struct io_hash_table * table , unsigned size )
2022-06-16 12:22:02 +03:00
{
unsigned int i ;
for ( i = 0 ; i < size ; i + + ) {
2022-06-16 12:22:10 +03:00
spin_lock_init ( & table - > hbs [ i ] . lock ) ;
INIT_HLIST_HEAD ( & table - > hbs [ i ] . list ) ;
2022-06-16 12:22:02 +03:00
}
}