io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups
Pass in EPOLL_URING_WAKE when signaling eventfd or doing poll related wakups, so that we can check for a circular event dependency between eventfd and epoll. If this flag is set when our wakeup handlers are called, then we know we have a dependency that needs to terminate multishot requests. eventfd and epoll are the only such possible dependencies. Cc: stable@vger.kernel.org # 6.0 Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
03e02acda8
commit
4464853277
@ -495,7 +495,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
|
|||||||
int ops = atomic_xchg(&ev_fd->ops, 0);
|
int ops = atomic_xchg(&ev_fd->ops, 0);
|
||||||
|
|
||||||
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
|
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
|
||||||
eventfd_signal(ev_fd->cq_ev_fd, 1);
|
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
|
||||||
|
|
||||||
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
|
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
|
||||||
* ordering in a race but if references are 0 we know we have to free
|
* ordering in a race but if references are 0 we know we have to free
|
||||||
@ -531,7 +531,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (likely(eventfd_signal_allowed())) {
|
if (likely(eventfd_signal_allowed())) {
|
||||||
eventfd_signal(ev_fd->cq_ev_fd, 1);
|
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
|
||||||
} else {
|
} else {
|
||||||
atomic_inc(&ev_fd->refs);
|
atomic_inc(&ev_fd->refs);
|
||||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
|
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/io_uring_types.h>
|
#include <linux/io_uring_types.h>
|
||||||
|
#include <uapi/linux/eventpoll.h>
|
||||||
#include "io-wq.h"
|
#include "io-wq.h"
|
||||||
#include "slist.h"
|
#include "slist.h"
|
||||||
#include "filetable.h"
|
#include "filetable.h"
|
||||||
@ -211,12 +212,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
|||||||
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
|
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* wake_up_all() may seem excessive, but io_wake_function() and
|
* Trigger waitqueue handler on all waiters on our waitqueue. This
|
||||||
* io_should_wake() handle the termination of the loop and only
|
* won't necessarily wake up all the tasks, io_should_wake() will make
|
||||||
* wake as many waiters as we need to.
|
* that decision.
|
||||||
|
*
|
||||||
|
* Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
|
||||||
|
* set in the mask so that if we recurse back into our own poll
|
||||||
|
* waitqueue handlers, we know we have a dependency between eventfd or
|
||||||
|
* epoll and should terminate multishot poll at that point.
|
||||||
*/
|
*/
|
||||||
if (waitqueue_active(&ctx->cq_wait))
|
if (waitqueue_active(&ctx->cq_wait))
|
||||||
wake_up_all(&ctx->cq_wait);
|
__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
|
||||||
|
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
|
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
|
||||||
|
@ -389,6 +389,14 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (io_poll_get_ownership(req)) {
|
if (io_poll_get_ownership(req)) {
|
||||||
|
/*
|
||||||
|
* If we trigger a multishot poll off our own wakeup path,
|
||||||
|
* disable multishot as there is a circular dependency between
|
||||||
|
* CQ posting and triggering the event.
|
||||||
|
*/
|
||||||
|
if (mask & EPOLL_URING_WAKE)
|
||||||
|
poll->events |= EPOLLONESHOT;
|
||||||
|
|
||||||
/* optional, saves extra locking for removal in tw handler */
|
/* optional, saves extra locking for removal in tw handler */
|
||||||
if (mask && poll->events & EPOLLONESHOT) {
|
if (mask && poll->events & EPOLLONESHOT) {
|
||||||
list_del_init(&poll->wait.entry);
|
list_del_init(&poll->wait.entry);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user