io_uring: cleanup io_aux_cqe() API
Everybody is passing in the request, so get rid of the io_ring_ctx and explicit user_data pass-in. Both the ctx and user_data can be deduced from the request at hand. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c92fcfc2ba
commit
d86eaed185
@ -935,9 +935,11 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
|
|||||||
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
|
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
|
bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
|
||||||
bool allow_overflow)
|
bool allow_overflow)
|
||||||
{
|
{
|
||||||
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
u64 user_data = req->cqe.user_data;
|
||||||
struct io_uring_cqe *cqe;
|
struct io_uring_cqe *cqe;
|
||||||
unsigned int length;
|
unsigned int length;
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
|||||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||||
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
|
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
|
||||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
||||||
bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
|
bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
|
||||||
bool allow_overflow);
|
bool allow_overflow);
|
||||||
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
||||||
|
|
||||||
|
@ -632,8 +632,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!mshot_finished) {
|
if (!mshot_finished) {
|
||||||
if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
||||||
req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
|
*ret, cflags | IORING_CQE_F_MORE, true)) {
|
||||||
io_recv_prep_retry(req);
|
io_recv_prep_retry(req);
|
||||||
/* Known not-empty or unknown state, retry */
|
/* Known not-empty or unknown state, retry */
|
||||||
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
|
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
|
||||||
@ -1304,7 +1304,6 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||||||
|
|
||||||
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
|
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
|
||||||
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
|
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
|
||||||
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
||||||
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
|
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
|
||||||
@ -1354,8 +1353,8 @@ retry:
|
|||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
|
||||||
req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
|
IORING_CQE_F_MORE, true))
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
return -ECANCELED;
|
return -ECANCELED;
|
||||||
|
@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
|
|||||||
__poll_t mask = mangle_poll(req->cqe.res &
|
__poll_t mask = mangle_poll(req->cqe.res &
|
||||||
req->apoll_events);
|
req->apoll_events);
|
||||||
|
|
||||||
if (!io_aux_cqe(req->ctx, ts->locked, req->cqe.user_data,
|
if (!io_aux_cqe(req, ts->locked, mask,
|
||||||
mask, IORING_CQE_F_MORE, false)) {
|
IORING_CQE_F_MORE, false)) {
|
||||||
io_req_set_res(req, mask, 0);
|
io_req_set_res(req, mask, 0);
|
||||||
return IOU_POLL_REMOVE_POLL_USE_RES;
|
return IOU_POLL_REMOVE_POLL_USE_RES;
|
||||||
}
|
}
|
||||||
|
@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
|
|||||||
|
|
||||||
if (!io_timeout_finish(timeout, data)) {
|
if (!io_timeout_finish(timeout, data)) {
|
||||||
bool filled;
|
bool filled;
|
||||||
filled = io_aux_cqe(ctx, ts->locked, req->cqe.user_data, -ETIME,
|
filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE,
|
||||||
IORING_CQE_F_MORE, false);
|
false);
|
||||||
if (filled) {
|
if (filled) {
|
||||||
/* re-arm timer */
|
/* re-arm timer */
|
||||||
spin_lock_irq(&ctx->timeout_lock);
|
spin_lock_irq(&ctx->timeout_lock);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user