c16bda3759
If we get woken spuriously when polling and fail the operation with -EAGAIN again, then we generally only allow polling again if data had been transferred at some point. This is indicated with REQ_F_PARTIAL_IO. However, if the spurious poll triggers when the socket was originally empty, then we haven't transferred data yet and we will fail the poll re-arm. This either punts the socket to io-wq if it's blocking, or it fails the request with -EAGAIN if not. Neither condition is desirable, as the former will slow things down, while the latter will make the application confused. We want to ensure that a repeated poll trigger doesn't lead to infinite work making no progress, that's what the REQ_F_PARTIAL_IO check was for. But it doesn't protect against a loop post the first receive, and it's unnecessarily strict if we started out with an empty socket. Add a somewhat random retry count, just to put an upper limit on the potential number of retries that will be done. This should be high enough that we won't really hit it in practice, unless something needs to be aborted anyway. Cc: stable@vger.kernel.org # v5.10+ Link: https://github.com/axboe/liburing/issues/364 Signed-off-by: Jens Axboe <axboe@kernel.dk>
41 lines
1023 B
C
41 lines
1023 B
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include "alloc_cache.h"
|
|
|
|
enum {
|
|
IO_APOLL_OK,
|
|
IO_APOLL_ABORTED,
|
|
IO_APOLL_READY
|
|
};
|
|
|
|
struct io_poll {
|
|
struct file *file;
|
|
struct wait_queue_head *head;
|
|
__poll_t events;
|
|
int retries;
|
|
struct wait_queue_entry wait;
|
|
};
|
|
|
|
struct async_poll {
|
|
union {
|
|
struct io_poll poll;
|
|
struct io_cache_entry cache;
|
|
};
|
|
struct io_poll *double_poll;
|
|
};
|
|
|
|
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
struct io_cancel_data;
|
|
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
|
unsigned issue_flags);
|
|
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
|
|
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
|
bool cancel_all);
|
|
|
|
void io_apoll_cache_free(struct io_cache_entry *entry);
|