io_uring: optimize io_uring_task layout
task_work bits of io_uring_task are split into two cache lines causing extra cache bouncing, place them into a separate cache line. Also move the most used submission path fields closer together, so there are hot. Cc: stable@vger.kernel.org # 5.15+ Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bce5d70cd6
commit
4a0fef6278
@ -7,22 +7,24 @@
|
||||
|
||||
struct io_uring_task {
|
||||
/* submission side */
|
||||
int cached_refs;
|
||||
struct xarray xa;
|
||||
struct wait_queue_head wait;
|
||||
const struct io_ring_ctx *last;
|
||||
struct io_wq *io_wq;
|
||||
struct percpu_counter inflight;
|
||||
atomic_t inflight_tracked;
|
||||
atomic_t in_idle;
|
||||
int cached_refs;
|
||||
const struct io_ring_ctx *last;
|
||||
struct io_wq *io_wq;
|
||||
struct file *registered_rings[IO_RINGFD_REG_MAX];
|
||||
|
||||
spinlock_t task_lock;
|
||||
struct io_wq_work_list task_list;
|
||||
struct io_wq_work_list prio_task_list;
|
||||
struct callback_head task_work;
|
||||
bool task_running;
|
||||
struct xarray xa;
|
||||
struct wait_queue_head wait;
|
||||
atomic_t in_idle;
|
||||
atomic_t inflight_tracked;
|
||||
struct percpu_counter inflight;
|
||||
|
||||
struct file *registered_rings[IO_RINGFD_REG_MAX];
|
||||
struct { /* task_work */
|
||||
spinlock_t task_lock;
|
||||
bool task_running;
|
||||
struct io_wq_work_list task_list;
|
||||
struct io_wq_work_list prio_task_list;
|
||||
struct callback_head task_work;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
struct io_tctx_node {
|
||||
|
Loading…
x
Reference in New Issue
Block a user