io_uring: remove passed in 'ctx' function parameter ctx if possible
Many times, the core of the function is req, and req has already set req->ctx at initialization time, so there is no need to pass in the ctx from the caller. Cleanup, no functional change. Signed-off-by: Jackie Liu <liuyun01@kylinos.cn> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
206aefde4f
commit
a197f664a0
108
fs/io_uring.c
108
fs/io_uring.c
@ -438,20 +438,20 @@ err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
|
||||
struct io_kiocb *req)
|
||||
static inline bool __io_sequence_defer(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
|
||||
+ atomic_read(&ctx->cached_cq_overflow);
|
||||
}
|
||||
|
||||
static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
|
||||
struct io_kiocb *req)
|
||||
static inline bool io_sequence_defer(struct io_kiocb *req)
|
||||
{
|
||||
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
|
||||
return false;
|
||||
|
||||
return __io_sequence_defer(ctx, req);
|
||||
return __io_sequence_defer(req);
|
||||
}
|
||||
|
||||
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
|
||||
@ -459,7 +459,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
|
||||
struct io_kiocb *req;
|
||||
|
||||
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
|
||||
if (req && !io_sequence_defer(ctx, req)) {
|
||||
if (req && !io_sequence_defer(req)) {
|
||||
list_del_init(&req->list);
|
||||
return req;
|
||||
}
|
||||
@ -472,7 +472,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
|
||||
struct io_kiocb *req;
|
||||
|
||||
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
|
||||
if (req && !__io_sequence_defer(ctx, req)) {
|
||||
if (req && !__io_sequence_defer(req)) {
|
||||
list_del_init(&req->list);
|
||||
return req;
|
||||
}
|
||||
@ -535,10 +535,10 @@ static inline bool io_prep_async_work(struct io_kiocb *req)
|
||||
return do_hashed;
|
||||
}
|
||||
|
||||
static inline void io_queue_async_work(struct io_ring_ctx *ctx,
|
||||
struct io_kiocb *req)
|
||||
static inline void io_queue_async_work(struct io_kiocb *req)
|
||||
{
|
||||
bool do_hashed = io_prep_async_work(req);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
|
||||
req->flags);
|
||||
@ -589,7 +589,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
|
||||
continue;
|
||||
}
|
||||
req->flags |= REQ_F_IO_DRAINED;
|
||||
io_queue_async_work(ctx, req);
|
||||
io_queue_async_work(req);
|
||||
}
|
||||
}
|
||||
|
||||
@ -792,9 +792,9 @@ static void __io_free_req(struct io_kiocb *req)
|
||||
kmem_cache_free(req_cachep, req);
|
||||
}
|
||||
|
||||
static bool io_link_cancel_timeout(struct io_ring_ctx *ctx,
|
||||
struct io_kiocb *req)
|
||||
static bool io_link_cancel_timeout(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret;
|
||||
|
||||
ret = hrtimer_try_to_cancel(&req->timeout.timer);
|
||||
@ -834,7 +834,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
|
||||
* in this context instead of having to queue up new async work.
|
||||
*/
|
||||
if (req->flags & REQ_F_LINK_TIMEOUT) {
|
||||
wake_ev = io_link_cancel_timeout(ctx, nxt);
|
||||
wake_ev = io_link_cancel_timeout(nxt);
|
||||
|
||||
/* we dropped this link, get next */
|
||||
nxt = list_first_entry_or_null(&req->link_list,
|
||||
@ -843,7 +843,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
|
||||
*nxtptr = nxt;
|
||||
break;
|
||||
} else {
|
||||
io_queue_async_work(req->ctx, nxt);
|
||||
io_queue_async_work(nxt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -871,7 +871,7 @@ static void io_fail_links(struct io_kiocb *req)
|
||||
|
||||
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
|
||||
link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
|
||||
io_link_cancel_timeout(ctx, link);
|
||||
io_link_cancel_timeout(link);
|
||||
} else {
|
||||
io_cqring_fill_event(link, -ECANCELED);
|
||||
io_double_put_req(link);
|
||||
@ -940,7 +940,7 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
|
||||
if (nxtptr)
|
||||
*nxtptr = nxt;
|
||||
else
|
||||
io_queue_async_work(nxt->ctx, nxt);
|
||||
io_queue_async_work(nxt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1899,7 +1899,7 @@ static void io_poll_remove_one(struct io_kiocb *req)
|
||||
WRITE_ONCE(poll->canceled, true);
|
||||
if (!list_empty(&poll->wait.entry)) {
|
||||
list_del_init(&poll->wait.entry);
|
||||
io_queue_async_work(req->ctx, req);
|
||||
io_queue_async_work(req);
|
||||
}
|
||||
spin_unlock(&poll->head->lock);
|
||||
|
||||
@ -1951,9 +1951,10 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
__poll_t mask)
|
||||
static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
req->poll.done = true;
|
||||
io_cqring_fill_event(req, mangle_poll(mask));
|
||||
io_commit_cqring(ctx);
|
||||
@ -1989,7 +1990,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
|
||||
return;
|
||||
}
|
||||
list_del_init(&req->list);
|
||||
io_poll_complete(ctx, req, mask);
|
||||
io_poll_complete(req, mask);
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
|
||||
io_cqring_ev_posted(ctx);
|
||||
@ -2017,13 +2018,13 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
|
||||
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
|
||||
list_del(&req->list);
|
||||
io_poll_complete(ctx, req, mask);
|
||||
io_poll_complete(req, mask);
|
||||
spin_unlock_irqrestore(&ctx->completion_lock, flags);
|
||||
|
||||
io_cqring_ev_posted(ctx);
|
||||
io_put_req(req, NULL);
|
||||
} else {
|
||||
io_queue_async_work(ctx, req);
|
||||
io_queue_async_work(req);
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -2108,7 +2109,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
}
|
||||
if (mask) { /* no async, we'd stolen it */
|
||||
ipt.error = 0;
|
||||
io_poll_complete(ctx, req, mask);
|
||||
io_poll_complete(req, mask);
|
||||
}
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
|
||||
@ -2355,12 +2356,13 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
static int io_req_defer(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->submit.sqe;
|
||||
struct io_uring_sqe *sqe_copy;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
|
||||
if (!io_sequence_defer(req) && list_empty(&ctx->defer_list))
|
||||
return 0;
|
||||
|
||||
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
|
||||
@ -2368,7 +2370,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
return -EAGAIN;
|
||||
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
|
||||
if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) {
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
kfree(sqe_copy);
|
||||
return 0;
|
||||
@ -2383,11 +2385,12 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
return -EIOCBQUEUED;
|
||||
}
|
||||
|
||||
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
struct io_kiocb **nxt, bool force_nonblock)
|
||||
static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
int ret, opcode;
|
||||
struct sqe_submit *s = &req->submit;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
opcode = READ_ONCE(s->sqe->opcode);
|
||||
switch (opcode) {
|
||||
@ -2467,7 +2470,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
|
||||
{
|
||||
struct io_wq_work *work = *workptr;
|
||||
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct sqe_submit *s = &req->submit;
|
||||
const struct io_uring_sqe *sqe = s->sqe;
|
||||
struct io_kiocb *nxt = NULL;
|
||||
@ -2483,7 +2485,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
|
||||
s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
|
||||
s->in_async = true;
|
||||
do {
|
||||
ret = __io_submit_sqe(ctx, req, &nxt, false);
|
||||
ret = __io_submit_sqe(req, &nxt, false);
|
||||
/*
|
||||
* We can get EAGAIN for polled IO even though we're
|
||||
* forcing a sync submission from here, since we can't
|
||||
@ -2537,10 +2539,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
|
||||
return table->files[index & IORING_FILE_TABLE_MASK];
|
||||
}
|
||||
|
||||
static int io_req_set_file(struct io_ring_ctx *ctx,
|
||||
struct io_submit_state *state, struct io_kiocb *req)
|
||||
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
|
||||
{
|
||||
struct sqe_submit *s = &req->submit;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned flags;
|
||||
int fd;
|
||||
|
||||
@ -2580,9 +2582,10 @@ static int io_req_set_file(struct io_ring_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
static int io_grab_files(struct io_kiocb *req)
|
||||
{
|
||||
int ret = -EBADF;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(&ctx->inflight_lock);
|
||||
@ -2698,7 +2701,7 @@ static inline struct io_kiocb *io_get_linked_timeout(struct io_kiocb *req)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
static int __io_queue_sqe(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *nxt;
|
||||
int ret;
|
||||
@ -2710,7 +2713,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = __io_submit_sqe(ctx, req, NULL, true);
|
||||
ret = __io_submit_sqe(req, NULL, true);
|
||||
|
||||
/*
|
||||
* We async punt it if the file wasn't marked NOWAIT, or if the file
|
||||
@ -2725,7 +2728,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
if (sqe_copy) {
|
||||
s->sqe = sqe_copy;
|
||||
if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
|
||||
ret = io_grab_files(ctx, req);
|
||||
ret = io_grab_files(req);
|
||||
if (ret) {
|
||||
kfree(sqe_copy);
|
||||
goto err;
|
||||
@ -2736,7 +2739,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
* Queued up for async execution, worker will release
|
||||
* submit reference when the iocb is actually submitted.
|
||||
*/
|
||||
io_queue_async_work(ctx, req);
|
||||
io_queue_async_work(req);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -2756,11 +2759,11 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
static int io_queue_sqe(struct io_kiocb *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = io_req_defer(ctx, req);
|
||||
ret = io_req_defer(req);
|
||||
if (ret) {
|
||||
if (ret != -EIOCBQUEUED) {
|
||||
io_cqring_add_event(req, ret);
|
||||
@ -2769,17 +2772,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __io_queue_sqe(ctx, req);
|
||||
return __io_queue_sqe(req);
|
||||
}
|
||||
|
||||
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
struct io_kiocb *shadow)
|
||||
static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
|
||||
{
|
||||
int ret;
|
||||
int need_submit = false;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (!shadow)
|
||||
return io_queue_sqe(ctx, req);
|
||||
return io_queue_sqe(req);
|
||||
|
||||
/*
|
||||
* Mark the first IO in link list as DRAIN, let all the following
|
||||
@ -2787,7 +2790,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
* list.
|
||||
*/
|
||||
req->flags |= REQ_F_IO_DRAIN;
|
||||
ret = io_req_defer(ctx, req);
|
||||
ret = io_req_defer(req);
|
||||
if (ret) {
|
||||
if (ret != -EIOCBQUEUED) {
|
||||
io_cqring_add_event(req, ret);
|
||||
@ -2810,18 +2813,19 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
|
||||
if (need_submit)
|
||||
return __io_queue_sqe(ctx, req);
|
||||
return __io_queue_sqe(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
|
||||
|
||||
static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
struct io_submit_state *state, struct io_kiocb **link)
|
||||
static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
|
||||
struct io_kiocb **link)
|
||||
{
|
||||
struct io_uring_sqe *sqe_copy;
|
||||
struct sqe_submit *s = &req->submit;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret;
|
||||
|
||||
req->user_data = s->sqe->user_data;
|
||||
@ -2832,7 +2836,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
goto err_req;
|
||||
}
|
||||
|
||||
ret = io_req_set_file(ctx, state, req);
|
||||
ret = io_req_set_file(state, req);
|
||||
if (unlikely(ret)) {
|
||||
err_req:
|
||||
io_cqring_add_event(req, ret);
|
||||
@ -2869,7 +2873,7 @@ err_req:
|
||||
ret = -EINVAL;
|
||||
goto err_req;
|
||||
} else {
|
||||
io_queue_sqe(ctx, req);
|
||||
io_queue_sqe(req);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3018,7 +3022,7 @@ out:
|
||||
req->submit.needs_fixed_file = async;
|
||||
trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
|
||||
true, async);
|
||||
io_submit_sqe(ctx, req, statep, &link);
|
||||
io_submit_sqe(req, statep, &link);
|
||||
submitted++;
|
||||
|
||||
/*
|
||||
@ -3026,14 +3030,14 @@ out:
|
||||
* that's the end of the chain. Submit the previous link.
|
||||
*/
|
||||
if (!(sqe_flags & IOSQE_IO_LINK) && link) {
|
||||
io_queue_link_head(ctx, link, shadow_req);
|
||||
io_queue_link_head(link, shadow_req);
|
||||
link = NULL;
|
||||
shadow_req = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (link)
|
||||
io_queue_link_head(ctx, link, shadow_req);
|
||||
io_queue_link_head(link, shadow_req);
|
||||
if (statep)
|
||||
io_submit_state_end(&state);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user