2022-06-13 16:12:45 +03:00
// SPDX-License-Identifier: GPL-2.0
# ifndef IOU_RSRC_H
# define IOU_RSRC_H
# include <net/af_unix.h>
2023-04-04 15:39:54 +03:00
# include "alloc_cache.h"
2023-04-04 15:39:57 +03:00
# define IO_NODE_ALLOC_CACHE_MAX 32
2022-06-13 16:12:45 +03:00
# define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
# define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
# define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
enum {
IORING_RSRC_FILE = 0 ,
IORING_RSRC_BUFFER = 1 ,
} ;
struct io_rsrc_put {
u64 tag ;
union {
void * rsrc ;
struct file * file ;
struct io_mapped_ubuf * buf ;
} ;
} ;
typedef void ( rsrc_put_fn ) ( struct io_ring_ctx * ctx , struct io_rsrc_put * prsrc ) ;
struct io_rsrc_data {
struct io_ring_ctx * ctx ;
u64 * * tags ;
unsigned int nr ;
2023-04-18 16:06:40 +03:00
u16 rsrc_type ;
2022-06-13 16:12:45 +03:00
bool quiesce ;
} ;
struct io_rsrc_node {
2023-04-04 15:39:54 +03:00
union {
struct io_cache_entry cache ;
2023-04-18 16:06:41 +03:00
struct io_ring_ctx * ctx ;
2023-04-04 15:39:54 +03:00
} ;
2023-04-04 15:39:49 +03:00
int refs ;
2023-04-18 16:06:37 +03:00
bool empty ;
2023-04-18 16:06:41 +03:00
u16 type ;
2023-04-18 16:06:36 +03:00
struct list_head node ;
struct io_rsrc_put item ;
2022-06-13 16:12:45 +03:00
} ;
2022-06-19 04:44:33 +03:00
struct io_mapped_ubuf {
u64 ubuf ;
u64 ubuf_end ;
unsigned int nr_bvecs ;
unsigned long acct_pages ;
2023-08-18 00:21:47 +03:00
struct bio_vec bvec [ ] __counted_by ( nr_bvecs ) ;
2022-06-19 04:44:33 +03:00
} ;
2023-04-04 15:39:45 +03:00
void io_rsrc_node_ref_zero ( struct io_rsrc_node * node ) ;
2023-04-04 15:39:54 +03:00
void io_rsrc_node_destroy ( struct io_ring_ctx * ctx , struct io_rsrc_node * ref_node ) ;
2023-04-11 14:06:07 +03:00
struct io_rsrc_node * io_rsrc_node_alloc ( struct io_ring_ctx * ctx ) ;
2023-04-18 16:06:35 +03:00
int io_queue_rsrc_removal ( struct io_rsrc_data * data , unsigned idx , void * rsrc ) ;
2022-06-13 16:12:45 +03:00
2022-06-20 03:25:59 +03:00
int io_import_fixed ( int ddir , struct iov_iter * iter ,
struct io_mapped_ubuf * imu ,
u64 buf_addr , size_t len ) ;
2022-06-13 16:12:45 +03:00
void __io_sqe_buffers_unregister ( struct io_ring_ctx * ctx ) ;
int io_sqe_buffers_unregister ( struct io_ring_ctx * ctx ) ;
int io_sqe_buffers_register ( struct io_ring_ctx * ctx , void __user * arg ,
unsigned int nr_args , u64 __user * tags ) ;
void __io_sqe_files_unregister ( struct io_ring_ctx * ctx ) ;
int io_sqe_files_unregister ( struct io_ring_ctx * ctx ) ;
int io_sqe_files_register ( struct io_ring_ctx * ctx , void __user * arg ,
unsigned nr_args , u64 __user * tags ) ;
int io_register_files_update ( struct io_ring_ctx * ctx , void __user * arg ,
unsigned nr_args ) ;
int io_register_rsrc_update ( struct io_ring_ctx * ctx , void __user * arg ,
unsigned size , unsigned type ) ;
int io_register_rsrc ( struct io_ring_ctx * ctx , void __user * arg ,
unsigned int size , unsigned int type ) ;
2023-04-04 15:39:55 +03:00
static inline void io_put_rsrc_node ( struct io_ring_ctx * ctx , struct io_rsrc_node * node )
2022-06-13 16:12:45 +03:00
{
2023-04-04 15:39:55 +03:00
lockdep_assert_held ( & ctx - > uring_lock ) ;
2023-04-04 15:39:49 +03:00
if ( node & & ! - - node - > refs )
io_rsrc_node_ref_zero ( node ) ;
2022-06-13 16:12:45 +03:00
}
static inline void io_req_put_rsrc_locked ( struct io_kiocb * req ,
struct io_ring_ctx * ctx )
{
2023-04-04 15:39:55 +03:00
io_put_rsrc_node ( ctx , req - > rsrc_node ) ;
2022-06-13 16:12:45 +03:00
}
2023-04-04 15:39:46 +03:00
static inline void io_charge_rsrc_node ( struct io_ring_ctx * ctx ,
struct io_rsrc_node * node )
2022-07-12 23:52:41 +03:00
{
2023-04-04 15:39:49 +03:00
node - > refs + + ;
2022-07-12 23:52:41 +03:00
}
2024-01-11 23:34:33 +03:00
static inline void __io_req_set_rsrc_node ( struct io_kiocb * req ,
struct io_ring_ctx * ctx )
{
lockdep_assert_held ( & ctx - > uring_lock ) ;
req - > rsrc_node = ctx - > rsrc_node ;
io_charge_rsrc_node ( ctx , ctx - > rsrc_node ) ;
}
2022-06-13 16:12:45 +03:00
static inline void io_req_set_rsrc_node ( struct io_kiocb * req ,
struct io_ring_ctx * ctx ,
unsigned int issue_flags )
{
if ( ! req - > rsrc_node ) {
2023-03-29 17:03:43 +03:00
io_ring_submit_lock ( ctx , issue_flags ) ;
2024-01-11 23:34:33 +03:00
__io_req_set_rsrc_node ( req , ctx ) ;
2023-03-29 17:03:43 +03:00
io_ring_submit_unlock ( ctx , issue_flags ) ;
2022-06-13 16:12:45 +03:00
}
}
static inline u64 * io_get_tag_slot ( struct io_rsrc_data * data , unsigned int idx )
{
unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK ;
unsigned int table_idx = idx > > IO_RSRC_TAG_TABLE_SHIFT ;
return & data - > tags [ table_idx ] [ off ] ;
}
2023-04-11 14:06:07 +03:00
static inline int io_rsrc_init ( struct io_ring_ctx * ctx )
{
ctx - > rsrc_node = io_rsrc_node_alloc ( ctx ) ;
return ctx - > rsrc_node ? 0 : - ENOMEM ;
}
2022-09-01 13:54:02 +03:00
int io_files_update ( struct io_kiocb * req , unsigned int issue_flags ) ;
int io_files_update_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
2022-07-25 12:52:05 +03:00
int __io_account_mem ( struct user_struct * user , unsigned long nr_pages ) ;
static inline void __io_unaccount_mem ( struct user_struct * user ,
unsigned long nr_pages )
{
atomic_long_sub ( nr_pages , & user - > locked_vm ) ;
}
2022-06-13 16:12:45 +03:00
# endif