io_uring: export req alloc from core
We want to do request allocation out of the core io_uring code, make the allocation functions public for other io_uring parts. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/0314fedd3a02a514210ba42d4720332538c65956.1658913593.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
293402e564
commit
bd1a3783dd
@ -852,18 +852,13 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return !ctx->submit_state.free_list.next;
|
||||
}
|
||||
|
||||
/*
|
||||
* A request might get retired back into the request caches even before opcode
|
||||
* handlers and io_issue_sqe() are done with it, e.g. inline completion path.
|
||||
* Because of that, io_alloc_req() should be called only under ->uring_lock
|
||||
* and with extra caution to not get a request that is still worked on.
|
||||
*/
|
||||
static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
|
||||
__cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
|
||||
@ -904,21 +899,6 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (unlikely(io_req_cache_empty(ctx)))
|
||||
return __io_alloc_req_refill(ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_wq_work_node *node;
|
||||
|
||||
node = wq_stack_extract(&ctx->submit_state.free_list);
|
||||
return container_of(node, struct io_kiocb, comp_list);
|
||||
}
|
||||
|
||||
static inline void io_dismantle_req(struct io_kiocb *req)
|
||||
{
|
||||
unsigned int flags = req->flags;
|
||||
|
@ -75,6 +75,7 @@ void io_free_req(struct io_kiocb *req);
|
||||
void io_queue_next(struct io_kiocb *req);
|
||||
void __io_put_task(struct task_struct *task, int nr);
|
||||
void io_task_refs_refill(struct io_uring_task *tctx);
|
||||
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
|
||||
|
||||
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
||||
bool cancel_all);
|
||||
@ -280,4 +281,24 @@ static inline void io_get_task_refs(int nr)
|
||||
io_task_refs_refill(tctx);
|
||||
}
|
||||
|
||||
static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return !ctx->submit_state.free_list.next;
|
||||
}
|
||||
|
||||
static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (unlikely(io_req_cache_empty(ctx)))
|
||||
return __io_alloc_req_refill(ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_wq_work_node *node;
|
||||
|
||||
node = wq_stack_extract(&ctx->submit_state.free_list);
|
||||
return container_of(node, struct io_kiocb, comp_list);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user