2022-06-16 01:27:42 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* io_uring opcode handling table
*/
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/io_uring.h>
# include "io_uring.h"
# include "opdef.h"
# include "refs.h"
# include "tctx.h"
# include "sqpoll.h"
# include "fdinfo.h"
# include "kbuf.h"
# include "rsrc.h"
# include "xattr.h"
# include "nop.h"
# include "fs.h"
# include "splice.h"
# include "sync.h"
# include "advise.h"
# include "openclose.h"
# include "uring_cmd.h"
# include "epoll.h"
# include "statx.h"
# include "net.h"
# include "msg_ring.h"
# include "timeout.h"
# include "poll.h"
# include "cancel.h"
# include "rw.h"
2023-07-11 01:14:37 +03:00
# include "waitid.h"
io_uring: add support for futex wake and wait
Add support for FUTEX_WAKE/WAIT primitives.
IORING_OP_FUTEX_WAKE is mix of FUTEX_WAKE and FUTEX_WAKE_BITSET, as
it does support passing in a bitset.
Similary, IORING_OP_FUTEX_WAIT is a mix of FUTEX_WAIT and
FUTEX_WAIT_BITSET.
For both of them, they are using the futex2 interface.
FUTEX_WAKE is straight forward, as those can always be done directly from
the io_uring submission without needing async handling. For FUTEX_WAIT,
things are a bit more complicated. If the futex isn't ready, then we
rely on a callback via futex_queue->wake() when someone wakes up the
futex. From that calback, we queue up task_work with the original task,
which will post a CQE and wake it, if necessary.
Cancelations are supported, both from the application point-of-view,
but also to be able to cancel pending waits if the ring exits before
all events have occurred. The return value of futex_unqueue() is used
to gate who wins the potential race between cancelation and futex
wakeups. Whomever gets a 'ret == 1' return from that claims ownership
of the io_uring futex request.
This is just the barebones wait/wake support. PI or REQUEUE support is
not added at this point, unclear if we might look into that later.
Likewise, explicit timeouts are not supported either. It is expected
that users that need timeouts would do so via the usual io_uring
mechanism to do that using linked timeouts.
The SQE format is as follows:
`addr` Address of futex
`fd` futex2(2) FUTEX2_* flags
`futex_flags` io_uring specific command flags. None valid now.
`addr2` Value of futex
`addr3` Mask to wake/wait
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-08 20:57:40 +03:00
# include "futex.h"
2024-02-02 15:17:24 +03:00
# include "truncate.h"
2022-06-16 01:27:42 +03:00
static int io_no_issue ( struct io_kiocb * req , unsigned int issue_flags )
{
WARN_ON_ONCE ( 1 ) ;
return - ECANCELED ;
}
static __maybe_unused int io_eopnotsupp_prep ( struct io_kiocb * kiocb ,
const struct io_uring_sqe * sqe )
{
return - EOPNOTSUPP ;
}
2023-01-12 17:44:10 +03:00
const struct io_issue_def io_issue_defs [ ] = {
2022-06-16 01:27:42 +03:00
[ IORING_OP_NOP ] = {
. audit_skip = 1 ,
. iopoll = 1 ,
. prep = io_nop_prep ,
. issue = io_nop ,
} ,
[ IORING_OP_READV ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. buffer_select = 1 ,
. plug = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2023-09-11 22:46:07 +03:00
. vectored = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2024-03-19 01:13:01 +03:00
. prep = io_prep_readv ,
2022-06-16 01:27:42 +03:00
. issue = io_read ,
} ,
[ IORING_OP_WRITEV ] = {
. needs_file = 1 ,
. hash_reg_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. plug = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2023-09-11 22:46:07 +03:00
. vectored = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2024-03-19 01:13:01 +03:00
. prep = io_prep_writev ,
2022-06-16 01:27:42 +03:00
. issue = io_write ,
} ,
[ IORING_OP_FSYNC ] = {
. needs_file = 1 ,
. audit_skip = 1 ,
. prep = io_fsync_prep ,
. issue = io_fsync ,
} ,
[ IORING_OP_READ_FIXED ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. plug = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2024-03-19 01:13:01 +03:00
. prep = io_prep_read_fixed ,
2022-06-16 01:27:42 +03:00
. issue = io_read ,
} ,
[ IORING_OP_WRITE_FIXED ] = {
. needs_file = 1 ,
. hash_reg_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. plug = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2024-03-19 01:13:01 +03:00
. prep = io_prep_write_fixed ,
2022-06-16 01:27:42 +03:00
. issue = io_write ,
} ,
[ IORING_OP_POLL_ADD ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. audit_skip = 1 ,
. prep = io_poll_add_prep ,
. issue = io_poll_add ,
} ,
[ IORING_OP_POLL_REMOVE ] = {
. audit_skip = 1 ,
. prep = io_poll_remove_prep ,
. issue = io_poll_remove ,
} ,
[ IORING_OP_SYNC_FILE_RANGE ] = {
. needs_file = 1 ,
. audit_skip = 1 ,
. prep = io_sfr_prep ,
. issue = io_sync_file_range ,
} ,
[ IORING_OP_SENDMSG ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. ioprio = 1 ,
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-06-16 01:27:42 +03:00
. prep = io_sendmsg_prep ,
. issue = io_sendmsg ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_RECVMSG ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. buffer_select = 1 ,
. ioprio = 1 ,
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-06-16 01:27:42 +03:00
. prep = io_recvmsg_prep ,
. issue = io_recvmsg ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_TIMEOUT ] = {
. audit_skip = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_timeout_data ) ,
2022-06-16 01:27:42 +03:00
. prep = io_timeout_prep ,
. issue = io_timeout ,
} ,
[ IORING_OP_TIMEOUT_REMOVE ] = {
/* used by timeout updates' prep() */
. audit_skip = 1 ,
. prep = io_timeout_remove_prep ,
. issue = io_timeout_remove ,
} ,
[ IORING_OP_ACCEPT ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. poll_exclusive = 1 ,
. ioprio = 1 , /* used for flags */
# if defined(CONFIG_NET)
. prep = io_accept_prep ,
. issue = io_accept ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_ASYNC_CANCEL ] = {
. audit_skip = 1 ,
. prep = io_async_cancel_prep ,
. issue = io_async_cancel ,
} ,
[ IORING_OP_LINK_TIMEOUT ] = {
. audit_skip = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_timeout_data ) ,
2022-06-16 01:27:42 +03:00
. prep = io_link_timeout_prep ,
. issue = io_no_issue ,
} ,
[ IORING_OP_CONNECT ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-06-16 01:27:42 +03:00
. prep = io_connect_prep ,
. issue = io_connect ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_FALLOCATE ] = {
. needs_file = 1 ,
. prep = io_fallocate_prep ,
. issue = io_fallocate ,
} ,
[ IORING_OP_OPENAT ] = {
. prep = io_openat_prep ,
. issue = io_openat ,
} ,
[ IORING_OP_CLOSE ] = {
. prep = io_close_prep ,
. issue = io_close ,
} ,
2022-09-01 13:54:02 +03:00
[ IORING_OP_FILES_UPDATE ] = {
2022-06-16 01:27:42 +03:00
. audit_skip = 1 ,
. iopoll = 1 ,
2022-09-01 13:54:02 +03:00
. prep = io_files_update_prep ,
. issue = io_files_update ,
2022-06-16 01:27:42 +03:00
} ,
[ IORING_OP_STATX ] = {
. audit_skip = 1 ,
. prep = io_statx_prep ,
. issue = io_statx ,
} ,
[ IORING_OP_READ ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. buffer_select = 1 ,
. plug = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2024-03-19 01:13:01 +03:00
. prep = io_prep_read ,
2022-06-16 01:27:42 +03:00
. issue = io_read ,
} ,
[ IORING_OP_WRITE ] = {
. needs_file = 1 ,
. hash_reg_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. plug = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2024-03-19 01:13:01 +03:00
. prep = io_prep_write ,
2022-06-16 01:27:42 +03:00
. issue = io_write ,
} ,
[ IORING_OP_FADVISE ] = {
. needs_file = 1 ,
. audit_skip = 1 ,
. prep = io_fadvise_prep ,
. issue = io_fadvise ,
} ,
[ IORING_OP_MADVISE ] = {
2023-02-01 23:33:33 +03:00
. audit_skip = 1 ,
2022-06-16 01:27:42 +03:00
. prep = io_madvise_prep ,
. issue = io_madvise ,
} ,
[ IORING_OP_SEND ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
io_uring/net: add provided buffer support for IORING_OP_SEND
It's pretty trivial to wire up provided buffer support for the send
side, just like how it's done the receive side. This enables setting up
a buffer ring that an application can use to push pending sends to,
and then have a send pick a buffer from that ring.
One of the challenges with async IO and networking sends is that you
can get into reordering conditions if you have more than one inflight
at the same time. Consider the following scenario where everything is
fine:
1) App queues sendA for socket1
2) App queues sendB for socket1
3) App does io_uring_submit()
4) sendA is issued, completes successfully, posts CQE
5) sendB is issued, completes successfully, posts CQE
All is fine. Requests are always issued in-order, and both complete
inline as most sends do.
However, if we're flooding socket1 with sends, the following could
also result from the same sequence:
1) App queues sendA for socket1
2) App queues sendB for socket1
3) App does io_uring_submit()
4) sendA is issued, socket1 is full, poll is armed for retry
5) Space frees up in socket1, this triggers sendA retry via task_work
6) sendB is issued, completes successfully, posts CQE
7) sendA is retried, completes successfully, posts CQE
Now we've sent sendB before sendA, which can make things unhappy. If
both sendA and sendB had been using provided buffers, then it would look
as follows instead:
1) App queues dataA for sendA, queues sendA for socket1
2) App queues dataB for sendB queues sendB for socket1
3) App does io_uring_submit()
4) sendA is issued, socket1 is full, poll is armed for retry
5) Space frees up in socket1, this triggers sendA retry via task_work
6) sendB is issued, picks first buffer (dataA), completes successfully,
posts CQE (which says "I sent dataA")
7) sendA is retried, picks first buffer (dataB), completes successfully,
posts CQE (which says "I sent dataB")
Now we've sent the data in order, and everybody is happy.
It's worth noting that this also opens the door for supporting multishot
sends, as provided buffers would be a prerequisite for that. Those can
trigger either when new buffers are added to the outgoing ring, or (if
stalled due to lack of space) when space frees up in the socket.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-02-19 20:46:44 +03:00
. buffer_select = 1 ,
2022-06-16 01:27:42 +03:00
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-06-16 01:27:42 +03:00
. prep = io_sendmsg_prep ,
. issue = io_send ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_RECV ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. buffer_select = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-06-16 01:27:42 +03:00
. prep = io_recvmsg_prep ,
. issue = io_recv ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_OPENAT2 ] = {
. prep = io_openat2_prep ,
. issue = io_openat2 ,
} ,
[ IORING_OP_EPOLL_CTL ] = {
. unbound_nonreg_file = 1 ,
. audit_skip = 1 ,
# if defined(CONFIG_EPOLL)
. prep = io_epoll_ctl_prep ,
. issue = io_epoll_ctl ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_SPLICE ] = {
. needs_file = 1 ,
. hash_reg_file = 1 ,
. unbound_nonreg_file = 1 ,
. audit_skip = 1 ,
. prep = io_splice_prep ,
. issue = io_splice ,
} ,
[ IORING_OP_PROVIDE_BUFFERS ] = {
. audit_skip = 1 ,
. iopoll = 1 ,
. prep = io_provide_buffers_prep ,
. issue = io_provide_buffers ,
} ,
[ IORING_OP_REMOVE_BUFFERS ] = {
. audit_skip = 1 ,
. iopoll = 1 ,
. prep = io_remove_buffers_prep ,
. issue = io_remove_buffers ,
} ,
[ IORING_OP_TEE ] = {
. needs_file = 1 ,
. hash_reg_file = 1 ,
. unbound_nonreg_file = 1 ,
. audit_skip = 1 ,
. prep = io_tee_prep ,
. issue = io_tee ,
} ,
[ IORING_OP_SHUTDOWN ] = {
. needs_file = 1 ,
# if defined(CONFIG_NET)
. prep = io_shutdown_prep ,
. issue = io_shutdown ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_RENAMEAT ] = {
. prep = io_renameat_prep ,
. issue = io_renameat ,
} ,
[ IORING_OP_UNLINKAT ] = {
. prep = io_unlinkat_prep ,
. issue = io_unlinkat ,
} ,
[ IORING_OP_MKDIRAT ] = {
. prep = io_mkdirat_prep ,
. issue = io_mkdirat ,
} ,
[ IORING_OP_SYMLINKAT ] = {
. prep = io_symlinkat_prep ,
. issue = io_symlinkat ,
} ,
[ IORING_OP_LINKAT ] = {
. prep = io_linkat_prep ,
. issue = io_linkat ,
} ,
[ IORING_OP_MSG_RING ] = {
. needs_file = 1 ,
. iopoll = 1 ,
. prep = io_msg_ring_prep ,
. issue = io_msg_ring ,
} ,
[ IORING_OP_FSETXATTR ] = {
. needs_file = 1 ,
. prep = io_fsetxattr_prep ,
. issue = io_fsetxattr ,
} ,
[ IORING_OP_SETXATTR ] = {
. prep = io_setxattr_prep ,
. issue = io_setxattr ,
} ,
[ IORING_OP_FGETXATTR ] = {
. needs_file = 1 ,
. prep = io_fgetxattr_prep ,
. issue = io_fgetxattr ,
} ,
[ IORING_OP_GETXATTR ] = {
. prep = io_getxattr_prep ,
. issue = io_getxattr ,
} ,
[ IORING_OP_SOCKET ] = {
. audit_skip = 1 ,
# if defined(CONFIG_NET)
. prep = io_socket_prep ,
. issue = io_socket ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_URING_CMD ] = {
. needs_file = 1 ,
. plug = 1 ,
2022-08-23 19:14:41 +03:00
. iopoll = 1 ,
2022-12-07 06:53:26 +03:00
. iopoll_queue = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = 2 * sizeof ( struct io_uring_sqe ) ,
2022-06-16 01:27:42 +03:00
. prep = io_uring_cmd_prep ,
. issue = io_uring_cmd ,
} ,
2022-09-01 13:54:04 +03:00
[ IORING_OP_SEND_ZC ] = {
2022-07-12 23:52:43 +03:00
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. audit_skip = 1 ,
. ioprio = 1 ,
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-09-21 14:17:52 +03:00
. prep = io_send_zc_prep ,
. issue = io_send_zc ,
2022-07-12 23:52:43 +03:00
# else
. prep = io_eopnotsupp_prep ,
2022-09-21 14:17:54 +03:00
# endif
} ,
[ IORING_OP_SENDMSG_ZC ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollout = 1 ,
. ioprio = 1 ,
# if defined(CONFIG_NET)
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_msghdr ) ,
2022-09-21 14:17:54 +03:00
. prep = io_send_zc_prep ,
. issue = io_sendmsg_zc ,
2023-01-12 17:44:11 +03:00
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
2023-09-11 22:35:42 +03:00
[ IORING_OP_READ_MULTISHOT ] = {
. needs_file = 1 ,
. unbound_nonreg_file = 1 ,
. pollin = 1 ,
. buffer_select = 1 ,
. audit_skip = 1 ,
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_async_rw ) ,
2023-09-11 22:35:42 +03:00
. prep = io_read_mshot_prep ,
. issue = io_read_mshot ,
} ,
2023-07-11 01:14:37 +03:00
[ IORING_OP_WAITID ] = {
2024-03-19 05:48:38 +03:00
. async_size = sizeof ( struct io_waitid_async ) ,
2023-07-11 01:14:37 +03:00
. prep = io_waitid_prep ,
. issue = io_waitid ,
} ,
io_uring: add support for futex wake and wait
Add support for FUTEX_WAKE/WAIT primitives.
IORING_OP_FUTEX_WAKE is mix of FUTEX_WAKE and FUTEX_WAKE_BITSET, as
it does support passing in a bitset.
Similary, IORING_OP_FUTEX_WAIT is a mix of FUTEX_WAIT and
FUTEX_WAIT_BITSET.
For both of them, they are using the futex2 interface.
FUTEX_WAKE is straight forward, as those can always be done directly from
the io_uring submission without needing async handling. For FUTEX_WAIT,
things are a bit more complicated. If the futex isn't ready, then we
rely on a callback via futex_queue->wake() when someone wakes up the
futex. From that calback, we queue up task_work with the original task,
which will post a CQE and wake it, if necessary.
Cancelations are supported, both from the application point-of-view,
but also to be able to cancel pending waits if the ring exits before
all events have occurred. The return value of futex_unqueue() is used
to gate who wins the potential race between cancelation and futex
wakeups. Whomever gets a 'ret == 1' return from that claims ownership
of the io_uring futex request.
This is just the barebones wait/wake support. PI or REQUEUE support is
not added at this point, unclear if we might look into that later.
Likewise, explicit timeouts are not supported either. It is expected
that users that need timeouts would do so via the usual io_uring
mechanism to do that using linked timeouts.
The SQE format is as follows:
`addr` Address of futex
`fd` futex2(2) FUTEX2_* flags
`futex_flags` io_uring specific command flags. None valid now.
`addr2` Value of futex
`addr3` Mask to wake/wait
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-08 20:57:40 +03:00
[ IORING_OP_FUTEX_WAIT ] = {
# if defined(CONFIG_FUTEX)
. prep = io_futex_prep ,
. issue = io_futex_wait ,
# else
. prep = io_eopnotsupp_prep ,
# endif
} ,
[ IORING_OP_FUTEX_WAKE ] = {
# if defined(CONFIG_FUTEX)
. prep = io_futex_prep ,
. issue = io_futex_wake ,
# else
. prep = io_eopnotsupp_prep ,
io_uring: add support for vectored futex waits
This adds support for IORING_OP_FUTEX_WAITV, which allows registering a
notification for a number of futexes at once. If one of the futexes are
woken, then the request will complete with the index of the futex that got
woken as the result. This is identical to what the normal vectored futex
waitv operation does.
Use like IORING_OP_FUTEX_WAIT, except sqe->addr must now contain a
pointer to a struct futex_waitv array, and sqe->off must now contain the
number of elements in that array. As flags are passed in the futex_vector
array, and likewise for the value and futex address(es), sqe->addr2
and sqe->addr3 are also reserved for IORING_OP_FUTEX_WAITV.
For cancelations, FUTEX_WAITV does not rely on the futex_unqueue()
return value as we're dealing with multiple futexes. Instead, a separate
per io_uring request atomic is used to claim ownership of the request.
Waiting on N futexes could be done with IORING_OP_FUTEX_WAIT as well,
but that punts a lot of the work to the application:
1) Application would need to submit N IORING_OP_FUTEX_WAIT requests,
rather than just a single IORING_OP_FUTEX_WAITV.
2) When one futex is woken, application would need to cancel the
remaining N-1 requests that didn't trigger.
While this is of course doable, having a single vectored futex wait
makes for much simpler application code.
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-13 04:04:32 +03:00
# endif
} ,
[ IORING_OP_FUTEX_WAITV ] = {
# if defined(CONFIG_FUTEX)
. prep = io_futexv_prep ,
. issue = io_futexv_wait ,
# else
. prep = io_eopnotsupp_prep ,
io_uring: add support for futex wake and wait
Add support for FUTEX_WAKE/WAIT primitives.
IORING_OP_FUTEX_WAKE is mix of FUTEX_WAKE and FUTEX_WAKE_BITSET, as
it does support passing in a bitset.
Similary, IORING_OP_FUTEX_WAIT is a mix of FUTEX_WAIT and
FUTEX_WAIT_BITSET.
For both of them, they are using the futex2 interface.
FUTEX_WAKE is straight forward, as those can always be done directly from
the io_uring submission without needing async handling. For FUTEX_WAIT,
things are a bit more complicated. If the futex isn't ready, then we
rely on a callback via futex_queue->wake() when someone wakes up the
futex. From that calback, we queue up task_work with the original task,
which will post a CQE and wake it, if necessary.
Cancelations are supported, both from the application point-of-view,
but also to be able to cancel pending waits if the ring exits before
all events have occurred. The return value of futex_unqueue() is used
to gate who wins the potential race between cancelation and futex
wakeups. Whomever gets a 'ret == 1' return from that claims ownership
of the io_uring futex request.
This is just the barebones wait/wake support. PI or REQUEUE support is
not added at this point, unclear if we might look into that later.
Likewise, explicit timeouts are not supported either. It is expected
that users that need timeouts would do so via the usual io_uring
mechanism to do that using linked timeouts.
The SQE format is as follows:
`addr` Address of futex
`fd` futex2(2) FUTEX2_* flags
`futex_flags` io_uring specific command flags. None valid now.
`addr2` Value of futex
`addr3` Mask to wake/wait
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-08 20:57:40 +03:00
# endif
} ,
2023-12-08 06:06:02 +03:00
[ IORING_OP_FIXED_FD_INSTALL ] = {
. needs_file = 1 ,
. prep = io_install_fixed_fd_prep ,
. issue = io_install_fixed_fd ,
} ,
2024-02-02 15:17:24 +03:00
[ IORING_OP_FTRUNCATE ] = {
. needs_file = 1 ,
. hash_reg_file = 1 ,
. prep = io_ftruncate_prep ,
. issue = io_ftruncate ,
} ,
2023-01-12 17:44:11 +03:00
} ;
const struct io_cold_def io_cold_defs [ ] = {
[ IORING_OP_NOP ] = {
. name = " NOP " ,
} ,
[ IORING_OP_READV ] = {
. name = " READV " ,
. cleanup = io_readv_writev_cleanup ,
. fail = io_rw_fail ,
} ,
[ IORING_OP_WRITEV ] = {
. name = " WRITEV " ,
. cleanup = io_readv_writev_cleanup ,
. fail = io_rw_fail ,
} ,
[ IORING_OP_FSYNC ] = {
. name = " FSYNC " ,
} ,
[ IORING_OP_READ_FIXED ] = {
. name = " READ_FIXED " ,
. fail = io_rw_fail ,
} ,
[ IORING_OP_WRITE_FIXED ] = {
. name = " WRITE_FIXED " ,
. fail = io_rw_fail ,
} ,
[ IORING_OP_POLL_ADD ] = {
. name = " POLL_ADD " ,
} ,
[ IORING_OP_POLL_REMOVE ] = {
. name = " POLL_REMOVE " ,
} ,
[ IORING_OP_SYNC_FILE_RANGE ] = {
. name = " SYNC_FILE_RANGE " ,
} ,
[ IORING_OP_SENDMSG ] = {
. name = " SENDMSG " ,
# if defined(CONFIG_NET)
. cleanup = io_sendmsg_recvmsg_cleanup ,
. fail = io_sendrecv_fail ,
# endif
} ,
[ IORING_OP_RECVMSG ] = {
. name = " RECVMSG " ,
# if defined(CONFIG_NET)
. cleanup = io_sendmsg_recvmsg_cleanup ,
. fail = io_sendrecv_fail ,
# endif
} ,
[ IORING_OP_TIMEOUT ] = {
. name = " TIMEOUT " ,
} ,
[ IORING_OP_TIMEOUT_REMOVE ] = {
. name = " TIMEOUT_REMOVE " ,
} ,
[ IORING_OP_ACCEPT ] = {
. name = " ACCEPT " ,
} ,
[ IORING_OP_ASYNC_CANCEL ] = {
. name = " ASYNC_CANCEL " ,
} ,
[ IORING_OP_LINK_TIMEOUT ] = {
. name = " LINK_TIMEOUT " ,
} ,
[ IORING_OP_CONNECT ] = {
. name = " CONNECT " ,
} ,
[ IORING_OP_FALLOCATE ] = {
. name = " FALLOCATE " ,
} ,
[ IORING_OP_OPENAT ] = {
. name = " OPENAT " ,
. cleanup = io_open_cleanup ,
} ,
[ IORING_OP_CLOSE ] = {
. name = " CLOSE " ,
} ,
[ IORING_OP_FILES_UPDATE ] = {
. name = " FILES_UPDATE " ,
} ,
[ IORING_OP_STATX ] = {
. name = " STATX " ,
. cleanup = io_statx_cleanup ,
} ,
[ IORING_OP_READ ] = {
. name = " READ " ,
. fail = io_rw_fail ,
} ,
[ IORING_OP_WRITE ] = {
. name = " WRITE " ,
. fail = io_rw_fail ,
} ,
[ IORING_OP_FADVISE ] = {
. name = " FADVISE " ,
} ,
[ IORING_OP_MADVISE ] = {
. name = " MADVISE " ,
} ,
[ IORING_OP_SEND ] = {
. name = " SEND " ,
# if defined(CONFIG_NET)
2024-03-05 19:34:21 +03:00
. cleanup = io_sendmsg_recvmsg_cleanup ,
2023-01-12 17:44:11 +03:00
. fail = io_sendrecv_fail ,
# endif
} ,
[ IORING_OP_RECV ] = {
. name = " RECV " ,
# if defined(CONFIG_NET)
2024-03-06 01:39:16 +03:00
. cleanup = io_sendmsg_recvmsg_cleanup ,
2023-01-12 17:44:11 +03:00
. fail = io_sendrecv_fail ,
# endif
} ,
[ IORING_OP_OPENAT2 ] = {
. name = " OPENAT2 " ,
. cleanup = io_open_cleanup ,
} ,
[ IORING_OP_EPOLL_CTL ] = {
. name = " EPOLL " ,
} ,
[ IORING_OP_SPLICE ] = {
. name = " SPLICE " ,
} ,
[ IORING_OP_PROVIDE_BUFFERS ] = {
. name = " PROVIDE_BUFFERS " ,
} ,
[ IORING_OP_REMOVE_BUFFERS ] = {
. name = " REMOVE_BUFFERS " ,
} ,
[ IORING_OP_TEE ] = {
. name = " TEE " ,
} ,
[ IORING_OP_SHUTDOWN ] = {
. name = " SHUTDOWN " ,
} ,
[ IORING_OP_RENAMEAT ] = {
. name = " RENAMEAT " ,
. cleanup = io_renameat_cleanup ,
} ,
[ IORING_OP_UNLINKAT ] = {
. name = " UNLINKAT " ,
. cleanup = io_unlinkat_cleanup ,
} ,
[ IORING_OP_MKDIRAT ] = {
. name = " MKDIRAT " ,
. cleanup = io_mkdirat_cleanup ,
} ,
[ IORING_OP_SYMLINKAT ] = {
. name = " SYMLINKAT " ,
. cleanup = io_link_cleanup ,
} ,
[ IORING_OP_LINKAT ] = {
. name = " LINKAT " ,
. cleanup = io_link_cleanup ,
} ,
[ IORING_OP_MSG_RING ] = {
. name = " MSG_RING " ,
. cleanup = io_msg_ring_cleanup ,
} ,
[ IORING_OP_FSETXATTR ] = {
. name = " FSETXATTR " ,
. cleanup = io_xattr_cleanup ,
} ,
[ IORING_OP_SETXATTR ] = {
. name = " SETXATTR " ,
. cleanup = io_xattr_cleanup ,
} ,
[ IORING_OP_FGETXATTR ] = {
. name = " FGETXATTR " ,
. cleanup = io_xattr_cleanup ,
} ,
[ IORING_OP_GETXATTR ] = {
. name = " GETXATTR " ,
. cleanup = io_xattr_cleanup ,
} ,
[ IORING_OP_SOCKET ] = {
. name = " SOCKET " ,
} ,
[ IORING_OP_URING_CMD ] = {
. name = " URING_CMD " ,
} ,
[ IORING_OP_SEND_ZC ] = {
. name = " SEND_ZC " ,
# if defined(CONFIG_NET)
. cleanup = io_send_zc_cleanup ,
. fail = io_sendrecv_fail ,
# endif
} ,
[ IORING_OP_SENDMSG_ZC ] = {
. name = " SENDMSG_ZC " ,
# if defined(CONFIG_NET)
2022-09-21 14:17:54 +03:00
. cleanup = io_send_zc_cleanup ,
. fail = io_sendrecv_fail ,
2022-07-12 23:52:43 +03:00
# endif
} ,
2023-09-11 22:35:42 +03:00
[ IORING_OP_READ_MULTISHOT ] = {
. name = " READ_MULTISHOT " ,
} ,
2023-07-11 01:14:37 +03:00
[ IORING_OP_WAITID ] = {
. name = " WAITID " ,
} ,
io_uring: add support for futex wake and wait
Add support for FUTEX_WAKE/WAIT primitives.
IORING_OP_FUTEX_WAKE is mix of FUTEX_WAKE and FUTEX_WAKE_BITSET, as
it does support passing in a bitset.
Similary, IORING_OP_FUTEX_WAIT is a mix of FUTEX_WAIT and
FUTEX_WAIT_BITSET.
For both of them, they are using the futex2 interface.
FUTEX_WAKE is straight forward, as those can always be done directly from
the io_uring submission without needing async handling. For FUTEX_WAIT,
things are a bit more complicated. If the futex isn't ready, then we
rely on a callback via futex_queue->wake() when someone wakes up the
futex. From that calback, we queue up task_work with the original task,
which will post a CQE and wake it, if necessary.
Cancelations are supported, both from the application point-of-view,
but also to be able to cancel pending waits if the ring exits before
all events have occurred. The return value of futex_unqueue() is used
to gate who wins the potential race between cancelation and futex
wakeups. Whomever gets a 'ret == 1' return from that claims ownership
of the io_uring futex request.
This is just the barebones wait/wake support. PI or REQUEUE support is
not added at this point, unclear if we might look into that later.
Likewise, explicit timeouts are not supported either. It is expected
that users that need timeouts would do so via the usual io_uring
mechanism to do that using linked timeouts.
The SQE format is as follows:
`addr` Address of futex
`fd` futex2(2) FUTEX2_* flags
`futex_flags` io_uring specific command flags. None valid now.
`addr2` Value of futex
`addr3` Mask to wake/wait
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-08 20:57:40 +03:00
[ IORING_OP_FUTEX_WAIT ] = {
. name = " FUTEX_WAIT " ,
} ,
[ IORING_OP_FUTEX_WAKE ] = {
. name = " FUTEX_WAKE " ,
} ,
io_uring: add support for vectored futex waits
This adds support for IORING_OP_FUTEX_WAITV, which allows registering a
notification for a number of futexes at once. If one of the futexes are
woken, then the request will complete with the index of the futex that got
woken as the result. This is identical to what the normal vectored futex
waitv operation does.
Use like IORING_OP_FUTEX_WAIT, except sqe->addr must now contain a
pointer to a struct futex_waitv array, and sqe->off must now contain the
number of elements in that array. As flags are passed in the futex_vector
array, and likewise for the value and futex address(es), sqe->addr2
and sqe->addr3 are also reserved for IORING_OP_FUTEX_WAITV.
For cancelations, FUTEX_WAITV does not rely on the futex_unqueue()
return value as we're dealing with multiple futexes. Instead, a separate
per io_uring request atomic is used to claim ownership of the request.
Waiting on N futexes could be done with IORING_OP_FUTEX_WAIT as well,
but that punts a lot of the work to the application:
1) Application would need to submit N IORING_OP_FUTEX_WAIT requests,
rather than just a single IORING_OP_FUTEX_WAITV.
2) When one futex is woken, application would need to cancel the
remaining N-1 requests that didn't trigger.
While this is of course doable, having a single vectored futex wait
makes for much simpler application code.
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-13 04:04:32 +03:00
[ IORING_OP_FUTEX_WAITV ] = {
. name = " FUTEX_WAITV " ,
} ,
2023-12-08 06:06:02 +03:00
[ IORING_OP_FIXED_FD_INSTALL ] = {
. name = " FIXED_FD_INSTALL " ,
} ,
2024-02-02 15:17:24 +03:00
[ IORING_OP_FTRUNCATE ] = {
. name = " FTRUNCATE " ,
} ,
2022-06-16 01:27:42 +03:00
} ;
const char * io_uring_get_opcode ( u8 opcode )
{
if ( opcode < IORING_OP_LAST )
2023-01-12 17:44:11 +03:00
return io_cold_defs [ opcode ] . name ;
2022-06-16 01:27:42 +03:00
return " INVALID " ;
}
void __init io_uring_optable_init ( void )
{
int i ;
2023-01-12 17:44:11 +03:00
BUILD_BUG_ON ( ARRAY_SIZE ( io_cold_defs ) ! = IORING_OP_LAST ) ;
2023-01-12 17:44:10 +03:00
BUILD_BUG_ON ( ARRAY_SIZE ( io_issue_defs ) ! = IORING_OP_LAST ) ;
2022-06-16 01:27:42 +03:00
2023-01-12 17:44:10 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( io_issue_defs ) ; i + + ) {
BUG_ON ( ! io_issue_defs [ i ] . prep ) ;
if ( io_issue_defs [ i ] . prep ! = io_eopnotsupp_prep )
BUG_ON ( ! io_issue_defs [ i ] . issue ) ;
2023-01-12 17:44:11 +03:00
WARN_ON_ONCE ( ! io_cold_defs [ i ] . name ) ;
2022-06-16 01:27:42 +03:00
}
}