io_uring/uring_cmd: switch to always allocating async data
Basic conversion ensuring async_data is allocated off the prep path. Adds a basic alloc cache as well, as passthrough IO can be quite high in rate. Tested-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -313,6 +313,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
sizeof(struct io_async_msghdr));
|
||||
io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct io_async_rw));
|
||||
io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct uring_cache));
|
||||
io_futex_cache_init(ctx);
|
||||
init_completion(&ctx->ref_comp);
|
||||
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
|
||||
@@ -2826,6 +2828,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
|
||||
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
|
||||
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
|
||||
io_alloc_cache_free(&ctx->uring_cache, io_uring_cache_free);
|
||||
io_futex_cache_free(ctx);
|
||||
io_destroy_buffers(ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
|
Reference in New Issue
Block a user