2019-03-06 19:03:50 +03:00
# ifndef LIB_URING_H
# define LIB_URING_H
2019-05-22 17:59:12 +03:00
# ifdef __cplusplus
extern " C " {
# endif
2019-03-06 19:03:50 +03:00
# include <sys/uio.h>
# include <signal.h>
# include <string.h>
# include "../../include/uapi/linux/io_uring.h"
2019-05-22 17:59:12 +03:00
# include <inttypes.h>
2020-06-17 12:53:55 +03:00
# include <linux/swab.h>
2019-05-22 17:59:12 +03:00
# include "barrier.h"
2019-03-06 19:03:50 +03:00
/*
* Library interface to io_uring
*/
struct io_uring_sq {
unsigned * khead ;
unsigned * ktail ;
unsigned * kring_mask ;
unsigned * kring_entries ;
unsigned * kflags ;
unsigned * kdropped ;
unsigned * array ;
struct io_uring_sqe * sqes ;
unsigned sqe_head ;
unsigned sqe_tail ;
size_t ring_sz ;
} ;
struct io_uring_cq {
unsigned * khead ;
unsigned * ktail ;
unsigned * kring_mask ;
unsigned * kring_entries ;
unsigned * koverflow ;
struct io_uring_cqe * cqes ;
size_t ring_sz ;
} ;
struct io_uring {
struct io_uring_sq sq ;
struct io_uring_cq cq ;
int ring_fd ;
} ;
/*
* System calls
*/
extern int io_uring_setup ( unsigned entries , struct io_uring_params * p ) ;
2019-05-22 17:59:12 +03:00
extern int io_uring_enter ( int fd , unsigned to_submit ,
2019-03-06 19:03:50 +03:00
unsigned min_complete , unsigned flags , sigset_t * sig ) ;
extern int io_uring_register ( int fd , unsigned int opcode , void * arg ,
unsigned int nr_args ) ;
/*
* Library interface
*/
extern int io_uring_queue_init ( unsigned entries , struct io_uring * ring ,
unsigned flags ) ;
extern int io_uring_queue_mmap ( int fd , struct io_uring_params * p ,
struct io_uring * ring ) ;
extern void io_uring_queue_exit ( struct io_uring * ring ) ;
2019-05-22 17:59:12 +03:00
extern int io_uring_peek_cqe ( struct io_uring * ring ,
2019-03-06 19:03:50 +03:00
struct io_uring_cqe * * cqe_ptr ) ;
2019-05-22 17:59:12 +03:00
extern int io_uring_wait_cqe ( struct io_uring * ring ,
2019-03-06 19:03:50 +03:00
struct io_uring_cqe * * cqe_ptr ) ;
extern int io_uring_submit ( struct io_uring * ring ) ;
extern struct io_uring_sqe * io_uring_get_sqe ( struct io_uring * ring ) ;
2019-05-22 17:59:12 +03:00
/*
* Must be called after io_uring_ { peek , wait } _cqe ( ) after the cqe has
* been processed by the application .
*/
static inline void io_uring_cqe_seen ( struct io_uring * ring ,
struct io_uring_cqe * cqe )
{
if ( cqe ) {
struct io_uring_cq * cq = & ring - > cq ;
( * cq - > khead ) + + ;
/*
* Ensure that the kernel sees our new head , the kernel has
* the matching read barrier .
*/
write_barrier ( ) ;
}
}
2019-03-06 19:03:50 +03:00
/*
* Command prep helpers
*/
static inline void io_uring_sqe_set_data ( struct io_uring_sqe * sqe , void * data )
{
sqe - > user_data = ( unsigned long ) data ;
}
2019-05-22 17:59:12 +03:00
static inline void * io_uring_cqe_get_data ( struct io_uring_cqe * cqe )
{
return ( void * ) ( uintptr_t ) cqe - > user_data ;
}
2019-03-06 19:03:50 +03:00
static inline void io_uring_prep_rw ( int op , struct io_uring_sqe * sqe , int fd ,
2019-05-22 17:59:12 +03:00
const void * addr , unsigned len ,
off_t offset )
2019-03-06 19:03:50 +03:00
{
memset ( sqe , 0 , sizeof ( * sqe ) ) ;
sqe - > opcode = op ;
sqe - > fd = fd ;
sqe - > off = offset ;
sqe - > addr = ( unsigned long ) addr ;
sqe - > len = len ;
}
static inline void io_uring_prep_readv ( struct io_uring_sqe * sqe , int fd ,
2019-05-22 17:59:12 +03:00
const struct iovec * iovecs ,
unsigned nr_vecs , off_t offset )
2019-03-06 19:03:50 +03:00
{
io_uring_prep_rw ( IORING_OP_READV , sqe , fd , iovecs , nr_vecs , offset ) ;
}
static inline void io_uring_prep_read_fixed ( struct io_uring_sqe * sqe , int fd ,
void * buf , unsigned nbytes ,
off_t offset )
{
io_uring_prep_rw ( IORING_OP_READ_FIXED , sqe , fd , buf , nbytes , offset ) ;
}
static inline void io_uring_prep_writev ( struct io_uring_sqe * sqe , int fd ,
2019-05-22 17:59:12 +03:00
const struct iovec * iovecs ,
unsigned nr_vecs , off_t offset )
2019-03-06 19:03:50 +03:00
{
io_uring_prep_rw ( IORING_OP_WRITEV , sqe , fd , iovecs , nr_vecs , offset ) ;
}
static inline void io_uring_prep_write_fixed ( struct io_uring_sqe * sqe , int fd ,
2019-05-22 17:59:12 +03:00
const void * buf , unsigned nbytes ,
2019-03-06 19:03:50 +03:00
off_t offset )
{
io_uring_prep_rw ( IORING_OP_WRITE_FIXED , sqe , fd , buf , nbytes , offset ) ;
}
static inline void io_uring_prep_poll_add ( struct io_uring_sqe * sqe , int fd ,
2020-06-17 12:53:55 +03:00
unsigned poll_mask )
2019-03-06 19:03:50 +03:00
{
memset ( sqe , 0 , sizeof ( * sqe ) ) ;
sqe - > opcode = IORING_OP_POLL_ADD ;
sqe - > fd = fd ;
2020-06-17 12:53:55 +03:00
# if __BYTE_ORDER == __BIG_ENDIAN
poll_mask = __swahw32 ( poll_mask ) ;
# endif
2019-03-06 19:03:50 +03:00
sqe - > poll_events = poll_mask ;
}
static inline void io_uring_prep_poll_remove ( struct io_uring_sqe * sqe ,
void * user_data )
{
memset ( sqe , 0 , sizeof ( * sqe ) ) ;
sqe - > opcode = IORING_OP_POLL_REMOVE ;
sqe - > addr = ( unsigned long ) user_data ;
}
static inline void io_uring_prep_fsync ( struct io_uring_sqe * sqe , int fd ,
2019-05-22 17:59:12 +03:00
unsigned fsync_flags )
2019-03-06 19:03:50 +03:00
{
memset ( sqe , 0 , sizeof ( * sqe ) ) ;
sqe - > opcode = IORING_OP_FSYNC ;
sqe - > fd = fd ;
2019-05-22 17:59:12 +03:00
sqe - > fsync_flags = fsync_flags ;
}
static inline void io_uring_prep_nop ( struct io_uring_sqe * sqe )
{
memset ( sqe , 0 , sizeof ( * sqe ) ) ;
sqe - > opcode = IORING_OP_NOP ;
}
# ifdef __cplusplus
2019-03-06 19:03:50 +03:00
}
2019-05-22 17:59:12 +03:00
# endif
2019-03-06 19:03:50 +03:00
# endif