2022-05-25 15:25:13 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/file.h>
# include <linux/slab.h>
# include <linux/net.h>
# include <linux/compat.h>
# include <net/compat.h>
# include <linux/io_uring.h>
# include <uapi/linux/io_uring.h>
# include "io_uring.h"
2022-06-13 16:07:23 +03:00
# include "kbuf.h"
2022-07-07 23:30:09 +03:00
# include "alloc_cache.h"
2022-05-25 15:25:13 +03:00
# include "net.h"
2022-07-12 23:52:43 +03:00
# include "notif.h"
2022-07-12 23:52:46 +03:00
# include "rsrc.h"
2022-05-25 15:25:13 +03:00
# if defined(CONFIG_NET)
struct io_shutdown {
struct file * file ;
int how ;
} ;
struct io_accept {
struct file * file ;
struct sockaddr __user * addr ;
int __user * addr_len ;
int flags ;
u32 file_slot ;
unsigned long nofile ;
} ;
struct io_socket {
struct file * file ;
int domain ;
int type ;
int protocol ;
int flags ;
u32 file_slot ;
unsigned long nofile ;
} ;
struct io_connect {
struct file * file ;
struct sockaddr __user * addr ;
int addr_len ;
2022-10-05 05:29:48 +03:00
bool in_progress ;
2022-05-25 15:25:13 +03:00
} ;
struct io_sr_msg {
struct file * file ;
union {
struct compat_msghdr __user * umsg_compat ;
struct user_msghdr __user * umsg ;
void __user * buf ;
} ;
2022-09-08 15:20:33 +03:00
unsigned len ;
unsigned done_io ;
2022-07-25 12:52:06 +03:00
unsigned msg_flags ;
2022-09-08 15:20:33 +03:00
u16 flags ;
2022-09-21 14:17:51 +03:00
/* initialised and used only by !msg send variants */
2022-09-08 15:20:33 +03:00
u16 addr_len ;
2023-01-22 20:02:55 +03:00
u16 buf_group ;
2022-07-12 23:52:45 +03:00
void __user * addr ;
2022-09-21 14:17:51 +03:00
/* used only for send zerocopy */
2022-09-01 13:54:04 +03:00
struct io_kiocb * notif ;
2022-07-12 23:52:43 +03:00
} ;
2022-12-07 06:53:31 +03:00
static inline bool io_check_multishot ( struct io_kiocb * req ,
unsigned int issue_flags )
{
/*
* When - > locked_cq is set we only allow to post CQEs from the original
* task context . Usual request completions will be handled in other
* generic paths but multipoll may decide to post extra cqes .
*/
return ! ( issue_flags & IO_URING_F_IOWQ ) | |
! ( issue_flags & IO_URING_F_MULTISHOT ) | |
! req - > ctx - > task_complete ;
}
2022-05-25 15:25:13 +03:00
int io_shutdown_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
2022-08-11 10:11:15 +03:00
struct io_shutdown * shutdown = io_kiocb_to_cmd ( req , struct io_shutdown ) ;
2022-05-25 15:25:13 +03:00
if ( unlikely ( sqe - > off | | sqe - > addr | | sqe - > rw_flags | |
sqe - > buf_index | | sqe - > splice_fd_in ) )
return - EINVAL ;
shutdown - > how = READ_ONCE ( sqe - > len ) ;
return 0 ;
}
int io_shutdown ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-08-11 10:11:15 +03:00
struct io_shutdown * shutdown = io_kiocb_to_cmd ( req , struct io_shutdown ) ;
2022-05-25 15:25:13 +03:00
struct socket * sock ;
int ret ;
if ( issue_flags & IO_URING_F_NONBLOCK )
return - EAGAIN ;
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
ret = __sys_shutdown_sock ( sock , shutdown - > how ) ;
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
static bool io_net_retry ( struct socket * sock , int flags )
{
if ( ! ( flags & MSG_WAITALL ) )
return false ;
return sock - > type = = SOCK_STREAM | | sock - > type = = SOCK_SEQPACKET ;
}
2022-07-07 23:30:09 +03:00
static void io_netmsg_recycle ( struct io_kiocb * req , unsigned int issue_flags )
{
struct io_async_msghdr * hdr = req - > async_data ;
2022-08-15 15:42:00 +03:00
if ( ! req_has_async_data ( req ) | | issue_flags & IO_URING_F_UNLOCKED )
2022-07-07 23:30:09 +03:00
return ;
/* Let normal cleanup path reap it if we fail adding to the cache */
if ( io_alloc_cache_put ( & req - > ctx - > netmsg_cache , & hdr - > cache ) ) {
req - > async_data = NULL ;
req - > flags & = ~ REQ_F_ASYNC_DATA ;
}
}
2022-09-08 15:20:30 +03:00
static struct io_async_msghdr * io_msg_alloc_async ( struct io_kiocb * req ,
unsigned int issue_flags )
2022-07-07 23:30:09 +03:00
{
struct io_ring_ctx * ctx = req - > ctx ;
struct io_cache_entry * entry ;
2022-09-26 16:35:09 +03:00
struct io_async_msghdr * hdr ;
2022-07-07 23:30:09 +03:00
2022-11-02 11:25:03 +03:00
if ( ! ( issue_flags & IO_URING_F_UNLOCKED ) ) {
entry = io_alloc_cache_get ( & ctx - > netmsg_cache ) ;
if ( entry ) {
hdr = container_of ( entry , struct io_async_msghdr , cache ) ;
hdr - > free_iov = NULL ;
req - > flags | = REQ_F_ASYNC_DATA ;
req - > async_data = hdr ;
return hdr ;
}
2022-07-07 23:30:09 +03:00
}
2022-09-26 16:35:09 +03:00
if ( ! io_alloc_async_data ( req ) ) {
hdr = req - > async_data ;
hdr - > free_iov = NULL ;
return hdr ;
}
2022-07-07 23:30:09 +03:00
return NULL ;
}
2022-09-08 15:20:30 +03:00
static inline struct io_async_msghdr * io_msg_alloc_async_prep ( struct io_kiocb * req )
{
/* ->prep_async is always called from the submission context */
return io_msg_alloc_async ( req , 0 ) ;
}
2022-05-25 15:25:13 +03:00
static int io_setup_async_msg ( struct io_kiocb * req ,
2022-07-07 23:30:09 +03:00
struct io_async_msghdr * kmsg ,
unsigned int issue_flags )
2022-05-25 15:25:13 +03:00
{
2022-08-18 14:38:34 +03:00
struct io_async_msghdr * async_msg ;
2022-05-25 15:25:13 +03:00
2022-08-18 14:38:34 +03:00
if ( req_has_async_data ( req ) )
2022-05-25 15:25:13 +03:00
return - EAGAIN ;
2022-09-08 15:20:30 +03:00
async_msg = io_msg_alloc_async ( req , issue_flags ) ;
2022-07-07 23:30:09 +03:00
if ( ! async_msg ) {
2022-05-25 15:25:13 +03:00
kfree ( kmsg - > free_iov ) ;
return - ENOMEM ;
}
req - > flags | = REQ_F_NEED_CLEANUP ;
memcpy ( async_msg , kmsg , sizeof ( * kmsg ) ) ;
2022-09-30 00:23:18 +03:00
if ( async_msg - > msg . msg_name )
async_msg - > msg . msg_name = & async_msg - > addr ;
2022-05-25 15:25:13 +03:00
/* if were using fast_iov, set it to the new one */
2022-09-29 10:39:10 +03:00
if ( ! kmsg - > free_iov ) {
size_t fast_idx = kmsg - > msg . msg_iter . iov - kmsg - > fast_iov ;
async_msg - > msg . msg_iter . iov = & async_msg - > fast_iov [ fast_idx ] ;
}
2022-05-25 15:25:13 +03:00
return - EAGAIN ;
}
static int io_sendmsg_copy_hdr ( struct io_kiocb * req ,
struct io_async_msghdr * iomsg )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
iomsg - > msg . msg_name = & iomsg - > addr ;
iomsg - > free_iov = iomsg - > fast_iov ;
return sendmsg_copy_msghdr ( & iomsg - > msg , sr - > umsg , sr - > msg_flags ,
& iomsg - > free_iov ) ;
}
2022-09-21 14:17:51 +03:00
int io_send_prep_async ( struct io_kiocb * req )
2022-08-24 15:07:43 +03:00
{
2022-09-08 15:20:34 +03:00
struct io_sr_msg * zc = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-08-24 15:07:43 +03:00
struct io_async_msghdr * io ;
int ret ;
if ( ! zc - > addr | | req_has_async_data ( req ) )
return 0 ;
2022-09-08 15:20:31 +03:00
io = io_msg_alloc_async_prep ( req ) ;
if ( ! io )
2022-08-24 15:07:43 +03:00
return - ENOMEM ;
ret = move_addr_to_kernel ( zc - > addr , zc - > addr_len , & io - > addr ) ;
return ret ;
}
static int io_setup_async_addr ( struct io_kiocb * req ,
2022-09-21 14:17:50 +03:00
struct sockaddr_storage * addr_storage ,
2022-08-24 15:07:43 +03:00
unsigned int issue_flags )
{
2022-09-21 14:17:50 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-08-24 15:07:43 +03:00
struct io_async_msghdr * io ;
2022-09-21 14:17:50 +03:00
if ( ! sr - > addr | | req_has_async_data ( req ) )
2022-08-24 15:07:43 +03:00
return - EAGAIN ;
2022-09-08 15:20:31 +03:00
io = io_msg_alloc_async ( req , issue_flags ) ;
if ( ! io )
2022-08-24 15:07:43 +03:00
return - ENOMEM ;
2022-09-21 14:17:50 +03:00
memcpy ( & io - > addr , addr_storage , sizeof ( io - > addr ) ) ;
2022-08-24 15:07:43 +03:00
return - EAGAIN ;
}
2022-05-25 15:25:13 +03:00
int io_sendmsg_prep_async ( struct io_kiocb * req )
{
int ret ;
2022-09-08 15:20:30 +03:00
if ( ! io_msg_alloc_async_prep ( req ) )
return - ENOMEM ;
2022-05-25 15:25:13 +03:00
ret = io_sendmsg_copy_hdr ( req , req - > async_data ) ;
if ( ! ret )
req - > flags | = REQ_F_NEED_CLEANUP ;
return ret ;
}
void io_sendmsg_recvmsg_cleanup ( struct io_kiocb * req )
{
struct io_async_msghdr * io = req - > async_data ;
kfree ( io - > free_iov ) ;
}
int io_sendmsg_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
2022-09-21 14:17:51 +03:00
if ( req - > opcode = = IORING_OP_SEND ) {
if ( READ_ONCE ( sqe - > __pad3 [ 0 ] ) )
return - EINVAL ;
sr - > addr = u64_to_user_ptr ( READ_ONCE ( sqe - > addr2 ) ) ;
sr - > addr_len = READ_ONCE ( sqe - > addr_len ) ;
} else if ( sqe - > addr2 | | sqe - > file_index ) {
2022-05-25 15:25:13 +03:00
return - EINVAL ;
2022-09-21 14:17:51 +03:00
}
2022-05-25 15:25:13 +03:00
sr - > umsg = u64_to_user_ptr ( READ_ONCE ( sqe - > addr ) ) ;
sr - > len = READ_ONCE ( sqe - > len ) ;
sr - > flags = READ_ONCE ( sqe - > ioprio ) ;
if ( sr - > flags & ~ IORING_RECVSEND_POLL_FIRST )
return - EINVAL ;
sr - > msg_flags = READ_ONCE ( sqe - > msg_flags ) | MSG_NOSIGNAL ;
if ( sr - > msg_flags & MSG_DONTWAIT )
req - > flags | = REQ_F_NOWAIT ;
# ifdef CONFIG_COMPAT
if ( req - > ctx - > compat )
sr - > msg_flags | = MSG_CMSG_COMPAT ;
# endif
sr - > done_io = 0 ;
return 0 ;
}
int io_sendmsg ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
struct io_async_msghdr iomsg , * kmsg ;
struct socket * sock ;
unsigned flags ;
int min_ret = 0 ;
int ret ;
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
if ( req_has_async_data ( req ) ) {
kmsg = req - > async_data ;
} else {
ret = io_sendmsg_copy_hdr ( req , & iomsg ) ;
if ( ret )
return ret ;
kmsg = & iomsg ;
}
if ( ! ( req - > flags & REQ_F_POLLED ) & &
( sr - > flags & IORING_RECVSEND_POLL_FIRST ) )
2022-07-07 23:30:09 +03:00
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-05-25 15:25:13 +03:00
flags = sr - > msg_flags ;
if ( issue_flags & IO_URING_F_NONBLOCK )
flags | = MSG_DONTWAIT ;
if ( flags & MSG_WAITALL )
min_ret = iov_iter_count ( & kmsg - > msg . msg_iter ) ;
ret = __sys_sendmsg_sock ( sock , & kmsg - > msg , flags ) ;
if ( ret < min_ret ) {
if ( ret = = - EAGAIN & & ( issue_flags & IO_URING_F_NONBLOCK ) )
2022-07-07 23:30:09 +03:00
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-05-25 15:25:13 +03:00
if ( ret > 0 & & io_net_retry ( sock , flags ) ) {
sr - > done_io + = ret ;
req - > flags | = REQ_F_PARTIAL_IO ;
2022-07-07 23:30:09 +03:00
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-05-25 15:25:13 +03:00
}
2022-09-08 15:20:29 +03:00
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
2022-05-25 15:25:13 +03:00
req_set_fail ( req ) ;
}
/* fast path, check for non-NULL to avoid function call */
if ( kmsg - > free_iov )
kfree ( kmsg - > free_iov ) ;
req - > flags & = ~ REQ_F_NEED_CLEANUP ;
2022-07-07 23:30:09 +03:00
io_netmsg_recycle ( req , issue_flags ) ;
2022-05-25 15:25:13 +03:00
if ( ret > = 0 )
ret + = sr - > done_io ;
else if ( sr - > done_io )
ret = sr - > done_io ;
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
int io_send ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-09-21 14:17:51 +03:00
struct sockaddr_storage __address ;
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
struct msghdr msg ;
struct iovec iov ;
struct socket * sock ;
unsigned flags ;
int min_ret = 0 ;
int ret ;
2022-09-29 03:03:29 +03:00
msg . msg_name = NULL ;
msg . msg_control = NULL ;
msg . msg_controllen = 0 ;
msg . msg_namelen = 0 ;
msg . msg_ubuf = NULL ;
2022-09-21 14:17:51 +03:00
if ( sr - > addr ) {
if ( req_has_async_data ( req ) ) {
struct io_async_msghdr * io = req - > async_data ;
msg . msg_name = & io - > addr ;
} else {
ret = move_addr_to_kernel ( sr - > addr , sr - > addr_len , & __address ) ;
if ( unlikely ( ret < 0 ) )
return ret ;
msg . msg_name = ( struct sockaddr * ) & __address ;
}
msg . msg_namelen = sr - > addr_len ;
}
2022-05-25 15:25:13 +03:00
if ( ! ( req - > flags & REQ_F_POLLED ) & &
( sr - > flags & IORING_RECVSEND_POLL_FIRST ) )
2022-09-21 14:17:51 +03:00
return io_setup_async_addr ( req , & __address , issue_flags ) ;
2022-05-25 15:25:13 +03:00
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
2022-09-16 03:25:47 +03:00
ret = import_single_range ( ITER_SOURCE , sr - > buf , sr - > len , & iov , & msg . msg_iter ) ;
2022-05-25 15:25:13 +03:00
if ( unlikely ( ret ) )
return ret ;
flags = sr - > msg_flags ;
if ( issue_flags & IO_URING_F_NONBLOCK )
flags | = MSG_DONTWAIT ;
if ( flags & MSG_WAITALL )
min_ret = iov_iter_count ( & msg . msg_iter ) ;
msg . msg_flags = flags ;
ret = sock_sendmsg ( sock , & msg ) ;
if ( ret < min_ret ) {
if ( ret = = - EAGAIN & & ( issue_flags & IO_URING_F_NONBLOCK ) )
2022-09-21 14:17:51 +03:00
return io_setup_async_addr ( req , & __address , issue_flags ) ;
2022-05-25 15:25:13 +03:00
if ( ret > 0 & & io_net_retry ( sock , flags ) ) {
sr - > len - = ret ;
sr - > buf + = ret ;
sr - > done_io + = ret ;
req - > flags | = REQ_F_PARTIAL_IO ;
2022-09-21 14:17:51 +03:00
return io_setup_async_addr ( req , & __address , issue_flags ) ;
2022-05-25 15:25:13 +03:00
}
2022-09-08 15:20:29 +03:00
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
2022-05-25 15:25:13 +03:00
req_set_fail ( req ) ;
}
if ( ret > = 0 )
ret + = sr - > done_io ;
else if ( sr - > done_io )
ret = sr - > done_io ;
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
2022-07-14 14:02:58 +03:00
static bool io_recvmsg_multishot_overflow ( struct io_async_msghdr * iomsg )
{
2022-07-15 16:02:52 +03:00
int hdr ;
2022-07-14 14:02:58 +03:00
2022-07-15 16:02:52 +03:00
if ( iomsg - > namelen < 0 )
2022-07-14 14:02:58 +03:00
return true ;
2022-07-15 16:02:52 +03:00
if ( check_add_overflow ( ( int ) sizeof ( struct io_uring_recvmsg_out ) ,
iomsg - > namelen , & hdr ) )
2022-07-14 14:02:58 +03:00
return true ;
2022-07-15 16:02:52 +03:00
if ( check_add_overflow ( hdr , ( int ) iomsg - > controllen , & hdr ) )
2022-07-14 14:02:58 +03:00
return true ;
return false ;
}
2022-05-25 15:25:13 +03:00
static int __io_recvmsg_copy_hdr ( struct io_kiocb * req ,
struct io_async_msghdr * iomsg )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-07-14 14:02:56 +03:00
struct user_msghdr msg ;
2022-05-25 15:25:13 +03:00
int ret ;
2022-07-14 14:02:56 +03:00
if ( copy_from_user ( & msg , sr - > umsg , sizeof ( * sr - > umsg ) ) )
return - EFAULT ;
ret = __copy_msghdr ( & iomsg - > msg , & msg , & iomsg - > uaddr ) ;
2022-05-25 15:25:13 +03:00
if ( ret )
return ret ;
if ( req - > flags & REQ_F_BUFFER_SELECT ) {
2022-07-14 14:02:56 +03:00
if ( msg . msg_iovlen = = 0 ) {
2022-06-30 12:12:22 +03:00
sr - > len = iomsg - > fast_iov [ 0 ] . iov_len = 0 ;
iomsg - > fast_iov [ 0 ] . iov_base = NULL ;
iomsg - > free_iov = NULL ;
2022-07-14 14:02:56 +03:00
} else if ( msg . msg_iovlen > 1 ) {
2022-05-25 15:25:13 +03:00
return - EINVAL ;
2022-06-30 12:12:22 +03:00
} else {
2022-07-14 14:02:56 +03:00
if ( copy_from_user ( iomsg - > fast_iov , msg . msg_iov , sizeof ( * msg . msg_iov ) ) )
2022-06-30 12:12:22 +03:00
return - EFAULT ;
sr - > len = iomsg - > fast_iov [ 0 ] . iov_len ;
iomsg - > free_iov = NULL ;
}
2022-07-14 14:02:58 +03:00
if ( req - > flags & REQ_F_APOLL_MULTISHOT ) {
iomsg - > namelen = msg . msg_namelen ;
iomsg - > controllen = msg . msg_controllen ;
if ( io_recvmsg_multishot_overflow ( iomsg ) )
return - EOVERFLOW ;
}
2022-05-25 15:25:13 +03:00
} else {
iomsg - > free_iov = iomsg - > fast_iov ;
2022-09-16 03:25:47 +03:00
ret = __import_iovec ( ITER_DEST , msg . msg_iov , msg . msg_iovlen , UIO_FASTIOV ,
2022-05-25 15:25:13 +03:00
& iomsg - > free_iov , & iomsg - > msg . msg_iter ,
false ) ;
if ( ret > 0 )
ret = 0 ;
}
return ret ;
}
# ifdef CONFIG_COMPAT
static int __io_compat_recvmsg_copy_hdr ( struct io_kiocb * req ,
struct io_async_msghdr * iomsg )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-07-14 14:02:57 +03:00
struct compat_msghdr msg ;
2022-05-25 15:25:13 +03:00
struct compat_iovec __user * uiov ;
int ret ;
2022-07-14 14:02:57 +03:00
if ( copy_from_user ( & msg , sr - > umsg_compat , sizeof ( msg ) ) )
return - EFAULT ;
2022-07-16 00:54:47 +03:00
ret = __get_compat_msghdr ( & iomsg - > msg , & msg , & iomsg - > uaddr ) ;
2022-05-25 15:25:13 +03:00
if ( ret )
return ret ;
2022-07-14 14:02:57 +03:00
uiov = compat_ptr ( msg . msg_iov ) ;
2022-05-25 15:25:13 +03:00
if ( req - > flags & REQ_F_BUFFER_SELECT ) {
compat_ssize_t clen ;
2022-12-19 17:28:26 +03:00
iomsg - > free_iov = NULL ;
2022-07-14 14:02:57 +03:00
if ( msg . msg_iovlen = = 0 ) {
2022-07-08 21:18:36 +03:00
sr - > len = 0 ;
2022-07-14 14:02:57 +03:00
} else if ( msg . msg_iovlen > 1 ) {
2022-05-25 15:25:13 +03:00
return - EINVAL ;
2022-07-08 21:18:36 +03:00
} else {
if ( ! access_ok ( uiov , sizeof ( * uiov ) ) )
return - EFAULT ;
if ( __get_user ( clen , & uiov - > iov_len ) )
return - EFAULT ;
if ( clen < 0 )
return - EINVAL ;
sr - > len = clen ;
}
2022-07-14 14:02:58 +03:00
if ( req - > flags & REQ_F_APOLL_MULTISHOT ) {
iomsg - > namelen = msg . msg_namelen ;
iomsg - > controllen = msg . msg_controllen ;
if ( io_recvmsg_multishot_overflow ( iomsg ) )
return - EOVERFLOW ;
}
2022-05-25 15:25:13 +03:00
} else {
iomsg - > free_iov = iomsg - > fast_iov ;
2022-09-16 03:25:47 +03:00
ret = __import_iovec ( ITER_DEST , ( struct iovec __user * ) uiov , msg . msg_iovlen ,
2022-05-25 15:25:13 +03:00
UIO_FASTIOV , & iomsg - > free_iov ,
& iomsg - > msg . msg_iter , true ) ;
if ( ret < 0 )
return ret ;
}
return 0 ;
}
# endif
static int io_recvmsg_copy_hdr ( struct io_kiocb * req ,
struct io_async_msghdr * iomsg )
{
iomsg - > msg . msg_name = & iomsg - > addr ;
# ifdef CONFIG_COMPAT
if ( req - > ctx - > compat )
return __io_compat_recvmsg_copy_hdr ( req , iomsg ) ;
# endif
return __io_recvmsg_copy_hdr ( req , iomsg ) ;
}
int io_recvmsg_prep_async ( struct io_kiocb * req )
{
int ret ;
2022-09-08 15:20:30 +03:00
if ( ! io_msg_alloc_async_prep ( req ) )
return - ENOMEM ;
2022-05-25 15:25:13 +03:00
ret = io_recvmsg_copy_hdr ( req , req - > async_data ) ;
if ( ! ret )
req - > flags | = REQ_F_NEED_CLEANUP ;
return ret ;
}
2022-06-30 12:12:29 +03:00
# define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
2022-05-25 15:25:13 +03:00
int io_recvmsg_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
if ( unlikely ( sqe - > file_index | | sqe - > addr2 ) )
return - EINVAL ;
sr - > umsg = u64_to_user_ptr ( READ_ONCE ( sqe - > addr ) ) ;
sr - > len = READ_ONCE ( sqe - > len ) ;
sr - > flags = READ_ONCE ( sqe - > ioprio ) ;
2022-06-30 12:12:29 +03:00
if ( sr - > flags & ~ ( RECVMSG_FLAGS ) )
2022-05-25 15:25:13 +03:00
return - EINVAL ;
sr - > msg_flags = READ_ONCE ( sqe - > msg_flags ) | MSG_NOSIGNAL ;
if ( sr - > msg_flags & MSG_DONTWAIT )
req - > flags | = REQ_F_NOWAIT ;
if ( sr - > msg_flags & MSG_ERRQUEUE )
req - > flags | = REQ_F_CLEAR_POLLIN ;
2022-06-30 12:12:29 +03:00
if ( sr - > flags & IORING_RECV_MULTISHOT ) {
if ( ! ( req - > flags & REQ_F_BUFFER_SELECT ) )
return - EINVAL ;
if ( sr - > msg_flags & MSG_WAITALL )
return - EINVAL ;
if ( req - > opcode = = IORING_OP_RECV & & sr - > len )
return - EINVAL ;
req - > flags | = REQ_F_APOLL_MULTISHOT ;
2023-01-22 20:02:55 +03:00
/*
* Store the buffer group for this multishot receive separately ,
* as if we end up doing an io - wq based issue that selects a
* buffer , it has to be committed immediately and that will
* clear - > buf_list . This means we lose the link to the buffer
* list , and the eventual buffer put on completion then cannot
* restore it .
*/
sr - > buf_group = req - > buf_index ;
2022-06-30 12:12:29 +03:00
}
2022-05-25 15:25:13 +03:00
# ifdef CONFIG_COMPAT
if ( req - > ctx - > compat )
sr - > msg_flags | = MSG_CMSG_COMPAT ;
# endif
sr - > done_io = 0 ;
return 0 ;
}
2022-06-30 12:12:29 +03:00
static inline void io_recv_prep_retry ( struct io_kiocb * req )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-06-30 12:12:29 +03:00
sr - > done_io = 0 ;
sr - > len = 0 ; /* get from the provided buffer */
2023-01-22 20:02:55 +03:00
req - > buf_index = sr - > buf_group ;
2022-06-30 12:12:29 +03:00
}
/*
2022-07-14 14:02:58 +03:00
* Finishes io_recv and io_recvmsg .
2022-06-30 12:12:29 +03:00
*
* Returns true if it is actually finished , or false if it should run
* again ( for multishot ) .
*/
2022-07-14 14:02:58 +03:00
static inline bool io_recv_finish ( struct io_kiocb * req , int * ret ,
2022-11-17 21:40:17 +03:00
unsigned int cflags , bool mshot_finished ,
unsigned issue_flags )
2022-06-30 12:12:29 +03:00
{
if ( ! ( req - > flags & REQ_F_APOLL_MULTISHOT ) ) {
io_req_set_res ( req , * ret , cflags ) ;
* ret = IOU_OK ;
return true ;
}
2022-07-14 14:02:58 +03:00
if ( ! mshot_finished ) {
2022-11-24 12:35:55 +03:00
if ( io_aux_cqe ( req - > ctx , issue_flags & IO_URING_F_COMPLETE_DEFER ,
req - > cqe . user_data , * ret , cflags | IORING_CQE_F_MORE , true ) ) {
2022-06-30 12:12:29 +03:00
io_recv_prep_retry ( req ) ;
return false ;
}
2022-11-07 15:52:35 +03:00
/* Otherwise stop multishot but use the current result. */
2022-06-30 12:12:29 +03:00
}
io_req_set_res ( req , * ret , cflags ) ;
2022-11-17 21:40:17 +03:00
if ( issue_flags & IO_URING_F_MULTISHOT )
2022-06-30 12:12:29 +03:00
* ret = IOU_STOP_MULTISHOT ;
2022-07-08 21:18:35 +03:00
else
* ret = IOU_OK ;
2022-06-30 12:12:29 +03:00
return true ;
}
2022-07-14 14:02:58 +03:00
static int io_recvmsg_prep_multishot ( struct io_async_msghdr * kmsg ,
struct io_sr_msg * sr , void __user * * buf ,
size_t * len )
{
unsigned long ubuf = ( unsigned long ) * buf ;
unsigned long hdr ;
hdr = sizeof ( struct io_uring_recvmsg_out ) + kmsg - > namelen +
kmsg - > controllen ;
if ( * len < hdr )
return - EFAULT ;
if ( kmsg - > controllen ) {
unsigned long control = ubuf + hdr - kmsg - > controllen ;
2022-08-05 14:54:50 +03:00
kmsg - > msg . msg_control_user = ( void __user * ) control ;
2022-07-14 14:02:58 +03:00
kmsg - > msg . msg_controllen = kmsg - > controllen ;
}
sr - > buf = * buf ; /* stash for later copy */
2022-08-05 14:54:50 +03:00
* buf = ( void __user * ) ( ubuf + hdr ) ;
2022-07-14 14:02:58 +03:00
kmsg - > payloadlen = * len = * len - hdr ;
return 0 ;
}
struct io_recvmsg_multishot_hdr {
struct io_uring_recvmsg_out msg ;
struct sockaddr_storage addr ;
} ;
static int io_recvmsg_multishot ( struct socket * sock , struct io_sr_msg * io ,
struct io_async_msghdr * kmsg ,
unsigned int flags , bool * finished )
{
int err ;
int copy_len ;
struct io_recvmsg_multishot_hdr hdr ;
if ( kmsg - > namelen )
kmsg - > msg . msg_name = & hdr . addr ;
kmsg - > msg . msg_flags = flags & ( MSG_CMSG_CLOEXEC | MSG_CMSG_COMPAT ) ;
kmsg - > msg . msg_namelen = 0 ;
if ( sock - > file - > f_flags & O_NONBLOCK )
flags | = MSG_DONTWAIT ;
err = sock_recvmsg ( sock , & kmsg - > msg , flags ) ;
* finished = err < = 0 ;
if ( err < 0 )
return err ;
hdr . msg = ( struct io_uring_recvmsg_out ) {
. controllen = kmsg - > controllen - kmsg - > msg . msg_controllen ,
. flags = kmsg - > msg . msg_flags & ~ MSG_CMSG_COMPAT
} ;
hdr . msg . payloadlen = err ;
if ( err > kmsg - > payloadlen )
err = kmsg - > payloadlen ;
copy_len = sizeof ( struct io_uring_recvmsg_out ) ;
if ( kmsg - > msg . msg_namelen > kmsg - > namelen )
copy_len + = kmsg - > namelen ;
else
copy_len + = kmsg - > msg . msg_namelen ;
/*
* " fromlen shall refer to the value before truncation.. "
* 1003.1 g
*/
hdr . msg . namelen = kmsg - > msg . msg_namelen ;
/* ensure that there is no gap between hdr and sockaddr_storage */
BUILD_BUG_ON ( offsetof ( struct io_recvmsg_multishot_hdr , addr ) ! =
sizeof ( struct io_uring_recvmsg_out ) ) ;
if ( copy_to_user ( io - > buf , & hdr , copy_len ) ) {
* finished = true ;
return - EFAULT ;
}
return sizeof ( struct io_uring_recvmsg_out ) + kmsg - > namelen +
kmsg - > controllen + err ;
}
2022-05-25 15:25:13 +03:00
int io_recvmsg ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
struct io_async_msghdr iomsg , * kmsg ;
struct socket * sock ;
unsigned int cflags ;
unsigned flags ;
int ret , min_ret = 0 ;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK ;
2022-07-14 14:02:58 +03:00
bool mshot_finished = true ;
2022-05-25 15:25:13 +03:00
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
if ( req_has_async_data ( req ) ) {
kmsg = req - > async_data ;
} else {
ret = io_recvmsg_copy_hdr ( req , & iomsg ) ;
if ( ret )
return ret ;
kmsg = & iomsg ;
}
if ( ! ( req - > flags & REQ_F_POLLED ) & &
( sr - > flags & IORING_RECVSEND_POLL_FIRST ) )
2022-07-07 23:30:09 +03:00
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-05-25 15:25:13 +03:00
2022-12-07 06:53:31 +03:00
if ( ! io_check_multishot ( req , issue_flags ) )
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-07-14 14:02:58 +03:00
retry_multishot :
2022-05-25 15:25:13 +03:00
if ( io_do_buffer_select ( req ) ) {
void __user * buf ;
2022-07-14 14:02:58 +03:00
size_t len = sr - > len ;
2022-05-25 15:25:13 +03:00
2022-07-14 14:02:58 +03:00
buf = io_buffer_select ( req , & len , issue_flags ) ;
2022-05-25 15:25:13 +03:00
if ( ! buf )
return - ENOBUFS ;
2022-07-14 14:02:58 +03:00
if ( req - > flags & REQ_F_APOLL_MULTISHOT ) {
ret = io_recvmsg_prep_multishot ( kmsg , sr , & buf , & len ) ;
if ( ret ) {
io_kbuf_recycle ( req , issue_flags ) ;
return ret ;
}
}
2022-05-25 15:25:13 +03:00
kmsg - > fast_iov [ 0 ] . iov_base = buf ;
2022-07-14 14:02:58 +03:00
kmsg - > fast_iov [ 0 ] . iov_len = len ;
2022-09-16 03:25:47 +03:00
iov_iter_init ( & kmsg - > msg . msg_iter , ITER_DEST , kmsg - > fast_iov , 1 ,
2022-07-14 14:02:58 +03:00
len ) ;
2022-05-25 15:25:13 +03:00
}
flags = sr - > msg_flags ;
if ( force_nonblock )
flags | = MSG_DONTWAIT ;
if ( flags & MSG_WAITALL )
min_ret = iov_iter_count ( & kmsg - > msg . msg_iter ) ;
kmsg - > msg . msg_get_inq = 1 ;
2022-07-14 14:02:58 +03:00
if ( req - > flags & REQ_F_APOLL_MULTISHOT )
ret = io_recvmsg_multishot ( sock , sr , kmsg , flags ,
& mshot_finished ) ;
else
ret = __sys_recvmsg_sock ( sock , & kmsg - > msg , sr - > umsg ,
kmsg - > uaddr , flags ) ;
2022-05-25 15:25:13 +03:00
if ( ret < min_ret ) {
2022-07-14 14:02:58 +03:00
if ( ret = = - EAGAIN & & force_nonblock ) {
ret = io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-11-17 21:40:17 +03:00
if ( ret = = - EAGAIN & & ( issue_flags & IO_URING_F_MULTISHOT ) ) {
2022-07-14 14:02:58 +03:00
io_kbuf_recycle ( req , issue_flags ) ;
return IOU_ISSUE_SKIP_COMPLETE ;
}
return ret ;
}
2022-05-25 15:25:13 +03:00
if ( ret > 0 & & io_net_retry ( sock , flags ) ) {
sr - > done_io + = ret ;
req - > flags | = REQ_F_PARTIAL_IO ;
2022-07-07 23:30:09 +03:00
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
2022-05-25 15:25:13 +03:00
}
2022-09-08 15:20:29 +03:00
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
2022-05-25 15:25:13 +03:00
req_set_fail ( req ) ;
} else if ( ( flags & MSG_WAITALL ) & & ( kmsg - > msg . msg_flags & ( MSG_TRUNC | MSG_CTRUNC ) ) ) {
req_set_fail ( req ) ;
}
2022-06-30 12:12:23 +03:00
if ( ret > 0 )
2022-05-25 15:25:13 +03:00
ret + = sr - > done_io ;
else if ( sr - > done_io )
ret = sr - > done_io ;
2022-06-30 12:12:23 +03:00
else
io_kbuf_recycle ( req , issue_flags ) ;
2022-05-25 15:25:13 +03:00
cflags = io_put_kbuf ( req , issue_flags ) ;
if ( kmsg - > msg . msg_inq )
cflags | = IORING_CQE_F_SOCK_NONEMPTY ;
2022-06-30 12:12:29 +03:00
2022-11-17 21:40:17 +03:00
if ( ! io_recv_finish ( req , & ret , cflags , mshot_finished , issue_flags ) )
2022-07-14 14:02:58 +03:00
goto retry_multishot ;
if ( mshot_finished ) {
/* fast path, check for non-NULL to avoid function call */
if ( kmsg - > free_iov )
kfree ( kmsg - > free_iov ) ;
2022-12-19 18:11:40 +03:00
io_netmsg_recycle ( req , issue_flags ) ;
2022-07-14 14:02:58 +03:00
req - > flags & = ~ REQ_F_NEED_CLEANUP ;
}
return ret ;
2022-05-25 15:25:13 +03:00
}
int io_recv ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-08-11 10:11:15 +03:00
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-05-25 15:25:13 +03:00
struct msghdr msg ;
struct socket * sock ;
struct iovec iov ;
unsigned int cflags ;
unsigned flags ;
int ret , min_ret = 0 ;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK ;
2022-06-30 12:12:29 +03:00
size_t len = sr - > len ;
2022-05-25 15:25:13 +03:00
if ( ! ( req - > flags & REQ_F_POLLED ) & &
( sr - > flags & IORING_RECVSEND_POLL_FIRST ) )
return - EAGAIN ;
2022-12-07 06:53:31 +03:00
if ( ! io_check_multishot ( req , issue_flags ) )
return - EAGAIN ;
2022-05-25 15:25:13 +03:00
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
2022-06-30 12:12:29 +03:00
retry_multishot :
2022-05-25 15:25:13 +03:00
if ( io_do_buffer_select ( req ) ) {
void __user * buf ;
2022-06-30 12:12:29 +03:00
buf = io_buffer_select ( req , & len , issue_flags ) ;
2022-05-25 15:25:13 +03:00
if ( ! buf )
return - ENOBUFS ;
sr - > buf = buf ;
}
2022-09-16 03:25:47 +03:00
ret = import_single_range ( ITER_DEST , sr - > buf , len , & iov , & msg . msg_iter ) ;
2022-05-25 15:25:13 +03:00
if ( unlikely ( ret ) )
goto out_free ;
msg . msg_name = NULL ;
msg . msg_namelen = 0 ;
msg . msg_control = NULL ;
msg . msg_get_inq = 1 ;
msg . msg_flags = 0 ;
msg . msg_controllen = 0 ;
msg . msg_iocb = NULL ;
2022-07-12 23:52:36 +03:00
msg . msg_ubuf = NULL ;
2022-05-25 15:25:13 +03:00
flags = sr - > msg_flags ;
if ( force_nonblock )
flags | = MSG_DONTWAIT ;
if ( flags & MSG_WAITALL )
min_ret = iov_iter_count ( & msg . msg_iter ) ;
ret = sock_recvmsg ( sock , & msg , flags ) ;
if ( ret < min_ret ) {
2022-06-30 12:12:29 +03:00
if ( ret = = - EAGAIN & & force_nonblock ) {
2022-11-17 21:40:17 +03:00
if ( issue_flags & IO_URING_F_MULTISHOT ) {
2022-06-30 12:12:29 +03:00
io_kbuf_recycle ( req , issue_flags ) ;
return IOU_ISSUE_SKIP_COMPLETE ;
}
2022-05-25 15:25:13 +03:00
return - EAGAIN ;
2022-06-30 12:12:29 +03:00
}
2022-05-25 15:25:13 +03:00
if ( ret > 0 & & io_net_retry ( sock , flags ) ) {
sr - > len - = ret ;
sr - > buf + = ret ;
sr - > done_io + = ret ;
req - > flags | = REQ_F_PARTIAL_IO ;
return - EAGAIN ;
}
2022-09-08 15:20:29 +03:00
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
2022-05-25 15:25:13 +03:00
req_set_fail ( req ) ;
} else if ( ( flags & MSG_WAITALL ) & & ( msg . msg_flags & ( MSG_TRUNC | MSG_CTRUNC ) ) ) {
out_free :
req_set_fail ( req ) ;
}
2022-06-30 12:12:23 +03:00
if ( ret > 0 )
2022-05-25 15:25:13 +03:00
ret + = sr - > done_io ;
else if ( sr - > done_io )
ret = sr - > done_io ;
2022-06-30 12:12:23 +03:00
else
io_kbuf_recycle ( req , issue_flags ) ;
2022-05-25 15:25:13 +03:00
cflags = io_put_kbuf ( req , issue_flags ) ;
if ( msg . msg_inq )
cflags | = IORING_CQE_F_SOCK_NONEMPTY ;
2022-06-30 12:12:29 +03:00
2022-11-17 21:40:17 +03:00
if ( ! io_recv_finish ( req , & ret , cflags , ret < = 0 , issue_flags ) )
2022-06-30 12:12:29 +03:00
goto retry_multishot ;
return ret ;
2022-05-25 15:25:13 +03:00
}
2022-09-21 14:17:52 +03:00
void io_send_zc_cleanup ( struct io_kiocb * req )
2022-09-01 13:54:04 +03:00
{
2022-09-08 15:20:34 +03:00
struct io_sr_msg * zc = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-09-21 14:17:54 +03:00
struct io_async_msghdr * io ;
2022-09-01 13:54:04 +03:00
2022-09-21 14:17:54 +03:00
if ( req_has_async_data ( req ) ) {
io = req - > async_data ;
2022-09-26 16:35:09 +03:00
/* might be ->fast_iov if *msg_copy_hdr failed */
if ( io - > free_iov ! = io - > fast_iov )
kfree ( io - > free_iov ) ;
2022-09-21 14:17:54 +03:00
}
2022-09-23 18:23:34 +03:00
if ( zc - > notif ) {
io_notif_flush ( zc - > notif ) ;
zc - > notif = NULL ;
}
2022-09-01 13:54:04 +03:00
}
2022-11-04 13:59:45 +03:00
# define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
# define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
2022-09-21 14:17:52 +03:00
int io_send_zc_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
2022-07-12 23:52:43 +03:00
{
2022-09-08 15:20:34 +03:00
struct io_sr_msg * zc = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-07-12 23:52:46 +03:00
struct io_ring_ctx * ctx = req - > ctx ;
2022-09-01 13:54:04 +03:00
struct io_kiocb * notif ;
2022-07-12 23:52:43 +03:00
2022-09-21 14:17:54 +03:00
if ( unlikely ( READ_ONCE ( sqe - > __pad2 [ 0 ] ) | | READ_ONCE ( sqe - > addr3 ) ) )
2022-09-01 13:54:04 +03:00
return - EINVAL ;
/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
if ( req - > flags & REQ_F_CQE_SKIP )
2022-07-12 23:52:43 +03:00
return - EINVAL ;
2022-09-17 01:22:57 +03:00
notif = zc - > notif = io_alloc_notif ( ctx ) ;
if ( ! notif )
return - ENOMEM ;
notif - > cqe . user_data = req - > cqe . user_data ;
notif - > cqe . res = 0 ;
notif - > cqe . flags = IORING_CQE_F_NOTIF ;
req - > flags | = REQ_F_NEED_CLEANUP ;
2022-11-04 13:59:45 +03:00
zc - > flags = READ_ONCE ( sqe - > ioprio ) ;
if ( unlikely ( zc - > flags & ~ IO_ZC_FLAGS_COMMON ) ) {
if ( zc - > flags & ~ IO_ZC_FLAGS_VALID )
return - EINVAL ;
if ( zc - > flags & IORING_SEND_ZC_REPORT_USAGE ) {
io_notif_set_extended ( notif ) ;
io_notif_to_data ( notif ) - > zc_report = true ;
}
}
2022-07-12 23:52:46 +03:00
if ( zc - > flags & IORING_RECVSEND_FIXED_BUF ) {
unsigned idx = READ_ONCE ( sqe - > buf_index ) ;
if ( unlikely ( idx > = ctx - > nr_user_bufs ) )
return - EFAULT ;
idx = array_index_nospec ( idx , ctx - > nr_user_bufs ) ;
req - > imu = READ_ONCE ( ctx - > user_bufs [ idx ] ) ;
2022-09-17 01:22:57 +03:00
io_req_set_rsrc_node ( notif , ctx , 0 ) ;
2022-07-12 23:52:46 +03:00
}
2022-07-12 23:52:43 +03:00
2022-09-21 14:17:54 +03:00
if ( req - > opcode = = IORING_OP_SEND_ZC ) {
if ( READ_ONCE ( sqe - > __pad3 [ 0 ] ) )
return - EINVAL ;
zc - > addr = u64_to_user_ptr ( READ_ONCE ( sqe - > addr2 ) ) ;
zc - > addr_len = READ_ONCE ( sqe - > addr_len ) ;
} else {
if ( unlikely ( sqe - > addr2 | | sqe - > file_index ) )
return - EINVAL ;
if ( unlikely ( zc - > flags & IORING_RECVSEND_FIXED_BUF ) )
return - EINVAL ;
}
2022-07-12 23:52:43 +03:00
zc - > buf = u64_to_user_ptr ( READ_ONCE ( sqe - > addr ) ) ;
zc - > len = READ_ONCE ( sqe - > len ) ;
zc - > msg_flags = READ_ONCE ( sqe - > msg_flags ) | MSG_NOSIGNAL ;
if ( zc - > msg_flags & MSG_DONTWAIT )
req - > flags | = REQ_F_NOWAIT ;
2022-07-12 23:52:45 +03:00
2022-08-04 17:15:30 +03:00
zc - > done_io = 0 ;
2022-07-12 23:52:45 +03:00
2022-07-12 23:52:43 +03:00
# ifdef CONFIG_COMPAT
if ( req - > ctx - > compat )
zc - > msg_flags | = MSG_CMSG_COMPAT ;
# endif
return 0 ;
}
2022-09-08 15:20:32 +03:00
static int io_sg_from_iter_iovec ( struct sock * sk , struct sk_buff * skb ,
struct iov_iter * from , size_t length )
{
skb_zcopy_downgrade_managed ( skb ) ;
return __zerocopy_sg_from_iter ( NULL , sk , skb , from , length ) ;
}
2022-07-12 23:52:50 +03:00
static int io_sg_from_iter ( struct sock * sk , struct sk_buff * skb ,
struct iov_iter * from , size_t length )
{
struct skb_shared_info * shinfo = skb_shinfo ( skb ) ;
int frag = shinfo - > nr_frags ;
int ret = 0 ;
struct bvec_iter bi ;
ssize_t copied = 0 ;
unsigned long truesize = 0 ;
2022-09-08 15:20:32 +03:00
if ( ! frag )
2022-07-12 23:52:50 +03:00
shinfo - > flags | = SKBFL_MANAGED_FRAG_REFS ;
2022-09-08 15:20:32 +03:00
else if ( unlikely ( ! skb_zcopy_managed ( skb ) ) )
2022-07-12 23:52:50 +03:00
return __zerocopy_sg_from_iter ( NULL , sk , skb , from , length ) ;
bi . bi_size = min ( from - > count , length ) ;
bi . bi_bvec_done = from - > iov_offset ;
bi . bi_idx = 0 ;
while ( bi . bi_size & & frag < MAX_SKB_FRAGS ) {
struct bio_vec v = mp_bvec_iter_bvec ( from - > bvec , bi ) ;
copied + = v . bv_len ;
truesize + = PAGE_ALIGN ( v . bv_len + v . bv_offset ) ;
__skb_fill_page_desc_noacc ( shinfo , frag + + , v . bv_page ,
v . bv_offset , v . bv_len ) ;
bvec_iter_advance_single ( from - > bvec , & bi , v . bv_len ) ;
}
if ( bi . bi_size )
ret = - EMSGSIZE ;
shinfo - > nr_frags = frag ;
from - > bvec + = bi . bi_idx ;
from - > nr_segs - = bi . bi_idx ;
2022-08-26 19:15:47 +03:00
from - > count - = copied ;
2022-07-12 23:52:50 +03:00
from - > iov_offset = bi . bi_bvec_done ;
skb - > data_len + = copied ;
skb - > len + = copied ;
skb - > truesize + = truesize ;
if ( sk & & sk - > sk_type = = SOCK_STREAM ) {
sk_wmem_queued_add ( sk , truesize ) ;
if ( ! skb_zcopy_pure ( skb ) )
sk_mem_charge ( sk , truesize ) ;
} else {
refcount_add ( truesize , & skb - > sk - > sk_wmem_alloc ) ;
}
return ret ;
}
2022-09-21 14:17:52 +03:00
int io_send_zc ( struct io_kiocb * req , unsigned int issue_flags )
2022-07-12 23:52:43 +03:00
{
2022-09-21 14:17:50 +03:00
struct sockaddr_storage __address ;
2022-09-08 15:20:34 +03:00
struct io_sr_msg * zc = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
2022-07-12 23:52:43 +03:00
struct msghdr msg ;
struct iovec iov ;
struct socket * sock ;
2022-09-28 02:51:49 +03:00
unsigned msg_flags ;
2022-07-12 23:52:43 +03:00
int ret , min_ret = 0 ;
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
2022-10-21 13:16:40 +03:00
if ( ! test_bit ( SOCK_SUPPORT_ZC , & sock - > flags ) )
return - EOPNOTSUPP ;
2022-07-12 23:52:43 +03:00
msg . msg_name = NULL ;
msg . msg_control = NULL ;
msg . msg_controllen = 0 ;
msg . msg_namelen = 0 ;
2022-08-15 15:42:01 +03:00
if ( zc - > addr ) {
2022-08-24 15:07:43 +03:00
if ( req_has_async_data ( req ) ) {
struct io_async_msghdr * io = req - > async_data ;
2022-09-21 14:17:50 +03:00
msg . msg_name = & io - > addr ;
2022-08-24 15:07:43 +03:00
} else {
ret = move_addr_to_kernel ( zc - > addr , zc - > addr_len , & __address ) ;
if ( unlikely ( ret < 0 ) )
return ret ;
msg . msg_name = ( struct sockaddr * ) & __address ;
}
2022-08-15 15:42:01 +03:00
msg . msg_namelen = zc - > addr_len ;
}
2022-09-08 16:01:10 +03:00
if ( ! ( req - > flags & REQ_F_POLLED ) & &
( zc - > flags & IORING_RECVSEND_POLL_FIRST ) )
2022-09-21 14:17:50 +03:00
return io_setup_async_addr ( req , & __address , issue_flags ) ;
2022-09-08 16:01:10 +03:00
2022-07-12 23:52:46 +03:00
if ( zc - > flags & IORING_RECVSEND_FIXED_BUF ) {
2022-09-16 03:25:47 +03:00
ret = io_import_fixed ( ITER_SOURCE , & msg . msg_iter , req - > imu ,
2022-07-12 23:52:46 +03:00
( u64 ) ( uintptr_t ) zc - > buf , zc - > len ) ;
if ( unlikely ( ret ) )
2022-08-24 15:07:40 +03:00
return ret ;
2022-09-08 15:20:32 +03:00
msg . sg_from_iter = io_sg_from_iter ;
2022-07-12 23:52:46 +03:00
} else {
2022-11-04 13:59:46 +03:00
io_notif_set_extended ( zc - > notif ) ;
2022-09-16 03:25:47 +03:00
ret = import_single_range ( ITER_SOURCE , zc - > buf , zc - > len , & iov ,
2022-07-12 23:52:46 +03:00
& msg . msg_iter ) ;
if ( unlikely ( ret ) )
return ret ;
2022-09-01 13:54:04 +03:00
ret = io_notif_account_mem ( zc - > notif , zc - > len ) ;
2022-07-25 12:52:04 +03:00
if ( unlikely ( ret ) )
return ret ;
2022-09-08 15:20:32 +03:00
msg . sg_from_iter = io_sg_from_iter_iovec ;
2022-07-12 23:52:46 +03:00
}
2022-07-12 23:52:43 +03:00
msg_flags = zc - > msg_flags | MSG_ZEROCOPY ;
if ( issue_flags & IO_URING_F_NONBLOCK )
msg_flags | = MSG_DONTWAIT ;
if ( msg_flags & MSG_WAITALL )
min_ret = iov_iter_count ( & msg . msg_iter ) ;
msg . msg_flags = msg_flags ;
2022-09-01 13:54:04 +03:00
msg . msg_ubuf = & io_notif_to_data ( zc - > notif ) - > uarg ;
2022-07-12 23:52:43 +03:00
ret = sock_sendmsg ( sock , & msg ) ;
if ( unlikely ( ret < min_ret ) ) {
if ( ret = = - EAGAIN & & ( issue_flags & IO_URING_F_NONBLOCK ) )
2022-09-21 14:17:50 +03:00
return io_setup_async_addr ( req , & __address , issue_flags ) ;
2022-08-24 15:07:43 +03:00
2022-08-04 17:15:30 +03:00
if ( ret > 0 & & io_net_retry ( sock , msg . msg_flags ) ) {
zc - > len - = ret ;
zc - > buf + = ret ;
zc - > done_io + = ret ;
req - > flags | = REQ_F_PARTIAL_IO ;
2022-09-21 14:17:50 +03:00
return io_setup_async_addr ( req , & __address , issue_flags ) ;
2022-08-04 17:15:30 +03:00
}
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
2022-08-24 15:07:39 +03:00
req_set_fail ( req ) ;
2022-07-12 23:52:43 +03:00
}
2022-08-04 17:15:30 +03:00
if ( ret > = 0 )
ret + = zc - > done_io ;
else if ( zc - > done_io )
ret = zc - > done_io ;
2022-09-01 13:54:04 +03:00
2022-09-30 00:23:19 +03:00
/*
* If we ' re in io - wq we can ' t rely on tw ordering guarantees , defer
* flushing notif to io_send_zc_cleanup ( )
*/
if ( ! ( issue_flags & IO_URING_F_UNLOCKED ) ) {
io_notif_flush ( zc - > notif ) ;
req - > flags & = ~ REQ_F_NEED_CLEANUP ;
}
2022-09-28 02:51:49 +03:00
io_req_set_res ( req , ret , IORING_CQE_F_MORE ) ;
2022-07-12 23:52:43 +03:00
return IOU_OK ;
}
2022-09-21 14:17:54 +03:00
int io_sendmsg_zc ( struct io_kiocb * req , unsigned int issue_flags )
{
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
struct io_async_msghdr iomsg , * kmsg ;
struct socket * sock ;
2022-09-28 02:51:49 +03:00
unsigned flags ;
2022-09-21 14:17:54 +03:00
int ret , min_ret = 0 ;
2022-11-04 13:59:46 +03:00
io_notif_set_extended ( sr - > notif ) ;
2022-09-21 14:17:54 +03:00
sock = sock_from_file ( req - > file ) ;
if ( unlikely ( ! sock ) )
return - ENOTSOCK ;
2022-10-21 13:16:41 +03:00
if ( ! test_bit ( SOCK_SUPPORT_ZC , & sock - > flags ) )
return - EOPNOTSUPP ;
2022-09-21 14:17:54 +03:00
if ( req_has_async_data ( req ) ) {
kmsg = req - > async_data ;
} else {
ret = io_sendmsg_copy_hdr ( req , & iomsg ) ;
if ( ret )
return ret ;
kmsg = & iomsg ;
}
if ( ! ( req - > flags & REQ_F_POLLED ) & &
( sr - > flags & IORING_RECVSEND_POLL_FIRST ) )
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
flags = sr - > msg_flags | MSG_ZEROCOPY ;
if ( issue_flags & IO_URING_F_NONBLOCK )
flags | = MSG_DONTWAIT ;
if ( flags & MSG_WAITALL )
min_ret = iov_iter_count ( & kmsg - > msg . msg_iter ) ;
kmsg - > msg . msg_ubuf = & io_notif_to_data ( sr - > notif ) - > uarg ;
kmsg - > msg . sg_from_iter = io_sg_from_iter_iovec ;
ret = __sys_sendmsg_sock ( sock , & kmsg - > msg , flags ) ;
if ( unlikely ( ret < min_ret ) ) {
if ( ret = = - EAGAIN & & ( issue_flags & IO_URING_F_NONBLOCK ) )
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
if ( ret > 0 & & io_net_retry ( sock , flags ) ) {
sr - > done_io + = ret ;
req - > flags | = REQ_F_PARTIAL_IO ;
return io_setup_async_msg ( req , kmsg , issue_flags ) ;
}
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
req_set_fail ( req ) ;
}
/* fast path, check for non-NULL to avoid function call */
2022-09-30 00:23:19 +03:00
if ( kmsg - > free_iov ) {
2022-09-21 14:17:54 +03:00
kfree ( kmsg - > free_iov ) ;
2022-09-30 00:23:19 +03:00
kmsg - > free_iov = NULL ;
}
2022-09-21 14:17:54 +03:00
io_netmsg_recycle ( req , issue_flags ) ;
if ( ret > = 0 )
ret + = sr - > done_io ;
else if ( sr - > done_io )
ret = sr - > done_io ;
2022-09-30 00:23:19 +03:00
/*
* If we ' re in io - wq we can ' t rely on tw ordering guarantees , defer
* flushing notif to io_send_zc_cleanup ( )
*/
if ( ! ( issue_flags & IO_URING_F_UNLOCKED ) ) {
io_notif_flush ( sr - > notif ) ;
req - > flags & = ~ REQ_F_NEED_CLEANUP ;
}
2022-09-28 02:51:49 +03:00
io_req_set_res ( req , ret , IORING_CQE_F_MORE ) ;
2022-09-21 14:17:54 +03:00
return IOU_OK ;
}
2022-09-21 14:17:48 +03:00
void io_sendrecv_fail ( struct io_kiocb * req )
{
struct io_sr_msg * sr = io_kiocb_to_cmd ( req , struct io_sr_msg ) ;
if ( req - > flags & REQ_F_PARTIAL_IO )
2022-09-28 02:51:49 +03:00
req - > cqe . res = sr - > done_io ;
2022-09-21 14:17:53 +03:00
if ( ( req - > flags & REQ_F_NEED_CLEANUP ) & &
2022-09-28 02:51:49 +03:00
( req - > opcode = = IORING_OP_SEND_ZC | | req - > opcode = = IORING_OP_SENDMSG_ZC ) )
req - > cqe . flags | = IORING_CQE_F_MORE ;
2022-09-21 14:17:49 +03:00
}
2022-05-25 15:25:13 +03:00
int io_accept_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
2022-08-11 10:11:15 +03:00
struct io_accept * accept = io_kiocb_to_cmd ( req , struct io_accept ) ;
2022-05-25 15:25:13 +03:00
unsigned flags ;
if ( sqe - > len | | sqe - > buf_index )
return - EINVAL ;
accept - > addr = u64_to_user_ptr ( READ_ONCE ( sqe - > addr ) ) ;
accept - > addr_len = u64_to_user_ptr ( READ_ONCE ( sqe - > addr2 ) ) ;
accept - > flags = READ_ONCE ( sqe - > accept_flags ) ;
accept - > nofile = rlimit ( RLIMIT_NOFILE ) ;
flags = READ_ONCE ( sqe - > ioprio ) ;
if ( flags & ~ IORING_ACCEPT_MULTISHOT )
return - EINVAL ;
accept - > file_slot = READ_ONCE ( sqe - > file_index ) ;
if ( accept - > file_slot ) {
if ( accept - > flags & SOCK_CLOEXEC )
return - EINVAL ;
if ( flags & IORING_ACCEPT_MULTISHOT & &
accept - > file_slot ! = IORING_FILE_INDEX_ALLOC )
return - EINVAL ;
}
if ( accept - > flags & ~ ( SOCK_CLOEXEC | SOCK_NONBLOCK ) )
return - EINVAL ;
if ( SOCK_NONBLOCK ! = O_NONBLOCK & & ( accept - > flags & SOCK_NONBLOCK ) )
accept - > flags = ( accept - > flags & ~ SOCK_NONBLOCK ) | O_NONBLOCK ;
if ( flags & IORING_ACCEPT_MULTISHOT )
req - > flags | = REQ_F_APOLL_MULTISHOT ;
return 0 ;
}
int io_accept ( struct io_kiocb * req , unsigned int issue_flags )
{
struct io_ring_ctx * ctx = req - > ctx ;
2022-08-11 10:11:15 +03:00
struct io_accept * accept = io_kiocb_to_cmd ( req , struct io_accept ) ;
2022-05-25 15:25:13 +03:00
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK ;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0 ;
bool fixed = ! ! accept - > file_slot ;
struct file * file ;
int ret , fd ;
2022-12-07 06:53:31 +03:00
if ( ! io_check_multishot ( req , issue_flags ) )
return - EAGAIN ;
2022-05-25 15:25:13 +03:00
retry :
if ( ! fixed ) {
fd = __get_unused_fd_flags ( accept - > flags , accept - > nofile ) ;
if ( unlikely ( fd < 0 ) )
return fd ;
}
file = do_accept ( req - > file , file_flags , accept - > addr , accept - > addr_len ,
accept - > flags ) ;
if ( IS_ERR ( file ) ) {
if ( ! fixed )
put_unused_fd ( fd ) ;
ret = PTR_ERR ( file ) ;
if ( ret = = - EAGAIN & & force_nonblock ) {
/*
* if it ' s multishot and polled , we don ' t need to
* return EAGAIN to arm the poll infra since it
* has already been done
*/
2022-11-17 21:40:16 +03:00
if ( issue_flags & IO_URING_F_MULTISHOT )
2022-05-25 15:25:13 +03:00
ret = IOU_ISSUE_SKIP_COMPLETE ;
return ret ;
}
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
req_set_fail ( req ) ;
} else if ( ! fixed ) {
fd_install ( fd , file ) ;
ret = fd ;
} else {
ret = io_fixed_fd_install ( req , issue_flags , file ,
accept - > file_slot ) ;
}
if ( ! ( req - > flags & REQ_F_APOLL_MULTISHOT ) ) {
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
2022-11-21 17:43:42 +03:00
if ( ret < 0 )
return ret ;
2022-11-24 12:35:55 +03:00
if ( io_aux_cqe ( ctx , issue_flags & IO_URING_F_COMPLETE_DEFER ,
req - > cqe . user_data , ret , IORING_CQE_F_MORE , true ) )
2022-06-17 11:48:00 +03:00
goto retry ;
2022-06-30 12:12:28 +03:00
2022-11-21 17:43:42 +03:00
return - ECANCELED ;
2022-05-25 15:25:13 +03:00
}
int io_socket_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
2022-08-11 10:11:15 +03:00
struct io_socket * sock = io_kiocb_to_cmd ( req , struct io_socket ) ;
2022-05-25 15:25:13 +03:00
if ( sqe - > addr | | sqe - > rw_flags | | sqe - > buf_index )
return - EINVAL ;
sock - > domain = READ_ONCE ( sqe - > fd ) ;
sock - > type = READ_ONCE ( sqe - > off ) ;
sock - > protocol = READ_ONCE ( sqe - > len ) ;
sock - > file_slot = READ_ONCE ( sqe - > file_index ) ;
sock - > nofile = rlimit ( RLIMIT_NOFILE ) ;
sock - > flags = sock - > type & ~ SOCK_TYPE_MASK ;
if ( sock - > file_slot & & ( sock - > flags & SOCK_CLOEXEC ) )
return - EINVAL ;
if ( sock - > flags & ~ ( SOCK_CLOEXEC | SOCK_NONBLOCK ) )
return - EINVAL ;
return 0 ;
}
int io_socket ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-08-11 10:11:15 +03:00
struct io_socket * sock = io_kiocb_to_cmd ( req , struct io_socket ) ;
2022-05-25 15:25:13 +03:00
bool fixed = ! ! sock - > file_slot ;
struct file * file ;
int ret , fd ;
if ( ! fixed ) {
fd = __get_unused_fd_flags ( sock - > flags , sock - > nofile ) ;
if ( unlikely ( fd < 0 ) )
return fd ;
}
file = __sys_socket_file ( sock - > domain , sock - > type , sock - > protocol ) ;
if ( IS_ERR ( file ) ) {
if ( ! fixed )
put_unused_fd ( fd ) ;
ret = PTR_ERR ( file ) ;
if ( ret = = - EAGAIN & & ( issue_flags & IO_URING_F_NONBLOCK ) )
return - EAGAIN ;
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
req_set_fail ( req ) ;
} else if ( ! fixed ) {
fd_install ( fd , file ) ;
ret = fd ;
} else {
ret = io_fixed_fd_install ( req , issue_flags , file ,
sock - > file_slot ) ;
}
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
int io_connect_prep_async ( struct io_kiocb * req )
{
struct io_async_connect * io = req - > async_data ;
2022-08-11 10:11:15 +03:00
struct io_connect * conn = io_kiocb_to_cmd ( req , struct io_connect ) ;
2022-05-25 15:25:13 +03:00
return move_addr_to_kernel ( conn - > addr , conn - > addr_len , & io - > address ) ;
}
int io_connect_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe )
{
2022-08-11 10:11:15 +03:00
struct io_connect * conn = io_kiocb_to_cmd ( req , struct io_connect ) ;
2022-05-25 15:25:13 +03:00
if ( sqe - > len | | sqe - > buf_index | | sqe - > rw_flags | | sqe - > splice_fd_in )
return - EINVAL ;
conn - > addr = u64_to_user_ptr ( READ_ONCE ( sqe - > addr ) ) ;
conn - > addr_len = READ_ONCE ( sqe - > addr2 ) ;
2022-10-05 05:29:48 +03:00
conn - > in_progress = false ;
2022-05-25 15:25:13 +03:00
return 0 ;
}
int io_connect ( struct io_kiocb * req , unsigned int issue_flags )
{
2022-08-11 10:11:15 +03:00
struct io_connect * connect = io_kiocb_to_cmd ( req , struct io_connect ) ;
2022-05-25 15:25:13 +03:00
struct io_async_connect __io , * io ;
unsigned file_flags ;
int ret ;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK ;
2022-10-05 05:29:48 +03:00
if ( connect - > in_progress ) {
struct socket * socket ;
ret = - ENOTSOCK ;
socket = sock_from_file ( req - > file ) ;
if ( socket )
ret = sock_error ( socket - > sk ) ;
goto out ;
}
2022-05-25 15:25:13 +03:00
if ( req_has_async_data ( req ) ) {
io = req - > async_data ;
} else {
ret = move_addr_to_kernel ( connect - > addr ,
connect - > addr_len ,
& __io . address ) ;
if ( ret )
goto out ;
io = & __io ;
}
file_flags = force_nonblock ? O_NONBLOCK : 0 ;
ret = __sys_connect_file ( req - > file , & io - > address ,
connect - > addr_len , file_flags ) ;
if ( ( ret = = - EAGAIN | | ret = = - EINPROGRESS ) & & force_nonblock ) {
2022-10-05 05:29:48 +03:00
if ( ret = = - EINPROGRESS ) {
connect - > in_progress = true ;
} else {
if ( req_has_async_data ( req ) )
return - EAGAIN ;
if ( io_alloc_async_data ( req ) ) {
ret = - ENOMEM ;
goto out ;
}
memcpy ( req - > async_data , & __io , sizeof ( __io ) ) ;
2022-05-25 15:25:13 +03:00
}
return - EAGAIN ;
}
if ( ret = = - ERESTARTSYS )
ret = - EINTR ;
out :
if ( ret < 0 )
req_set_fail ( req ) ;
io_req_set_res ( req , ret , 0 ) ;
return IOU_OK ;
}
2022-07-07 23:30:09 +03:00
void io_netmsg_cache_free ( struct io_cache_entry * entry )
{
kfree ( container_of ( entry , struct io_async_msghdr , cache ) ) ;
}
2022-05-25 15:25:13 +03:00
# endif