2022-05-25 15:25:13 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/net.h>
# include <linux/uio.h>
2022-07-07 23:30:09 +03:00
# include "alloc_cache.h"
2022-05-25 15:25:13 +03:00
struct io_async_msghdr {
2023-02-23 19:43:53 +03:00
# if defined(CONFIG_NET)
2022-07-07 23:30:09 +03:00
union {
struct iovec fast_iov [ UIO_FASTIOV ] ;
2022-07-14 14:02:58 +03:00
struct {
struct iovec fast_iov_one ;
__kernel_size_t controllen ;
int namelen ;
__kernel_size_t payloadlen ;
} ;
2022-07-07 23:30:09 +03:00
struct io_cache_entry cache ;
} ;
2022-05-25 15:25:13 +03:00
/* points to an allocated iov, if NULL we use fast_iov instead */
struct iovec * free_iov ;
struct sockaddr __user * uaddr ;
struct msghdr msg ;
struct sockaddr_storage addr ;
2023-02-23 19:43:53 +03:00
# endif
2022-05-25 15:25:13 +03:00
} ;
2023-02-23 19:43:53 +03:00
# if defined(CONFIG_NET)
2022-05-25 15:25:13 +03:00
struct io_async_connect {
struct sockaddr_storage address ;
} ;
int io_shutdown_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
int io_shutdown ( struct io_kiocb * req , unsigned int issue_flags ) ;
int io_sendmsg_prep_async ( struct io_kiocb * req ) ;
void io_sendmsg_recvmsg_cleanup ( struct io_kiocb * req ) ;
int io_sendmsg_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
int io_sendmsg ( struct io_kiocb * req , unsigned int issue_flags ) ;
2022-09-21 14:17:51 +03:00
2022-05-25 15:25:13 +03:00
int io_send ( struct io_kiocb * req , unsigned int issue_flags ) ;
2022-09-21 14:17:51 +03:00
int io_send_prep_async ( struct io_kiocb * req ) ;
2022-05-25 15:25:13 +03:00
int io_recvmsg_prep_async ( struct io_kiocb * req ) ;
int io_recvmsg_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
int io_recvmsg ( struct io_kiocb * req , unsigned int issue_flags ) ;
int io_recv ( struct io_kiocb * req , unsigned int issue_flags ) ;
2022-09-21 14:17:48 +03:00
void io_sendrecv_fail ( struct io_kiocb * req ) ;
2022-05-25 15:25:13 +03:00
int io_accept_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
int io_accept ( struct io_kiocb * req , unsigned int issue_flags ) ;
int io_socket_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
int io_socket ( struct io_kiocb * req , unsigned int issue_flags ) ;
int io_connect_prep_async ( struct io_kiocb * req ) ;
int io_connect_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
int io_connect ( struct io_kiocb * req , unsigned int issue_flags ) ;
2022-07-07 23:30:09 +03:00
2022-09-21 14:17:52 +03:00
int io_send_zc ( struct io_kiocb * req , unsigned int issue_flags ) ;
2022-09-21 14:17:54 +03:00
int io_sendmsg_zc ( struct io_kiocb * req , unsigned int issue_flags ) ;
2022-09-21 14:17:52 +03:00
int io_send_zc_prep ( struct io_kiocb * req , const struct io_uring_sqe * sqe ) ;
void io_send_zc_cleanup ( struct io_kiocb * req ) ;
2022-07-12 23:52:43 +03:00
2022-07-07 23:30:09 +03:00
void io_netmsg_cache_free ( struct io_cache_entry * entry ) ;
# else
static inline void io_netmsg_cache_free ( struct io_cache_entry * entry )
{
}
2022-05-25 15:25:13 +03:00
# endif