io_uring: use constants for cq_overflow bitfield
Prepare to use this bitfield for more flags by using constants instead of magic value 0 Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220421091345.2115755-5-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3e813c9026
commit
10988a0a67
@ -431,7 +431,7 @@ struct io_ring_ctx {
|
|||||||
struct wait_queue_head sqo_sq_wait;
|
struct wait_queue_head sqo_sq_wait;
|
||||||
struct list_head sqd_list;
|
struct list_head sqd_list;
|
||||||
|
|
||||||
unsigned long check_cq_overflow;
|
unsigned long check_cq;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
/*
|
/*
|
||||||
@ -903,6 +903,10 @@ struct io_cqe {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
IO_CHECK_CQ_OVERFLOW_BIT,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE! Each of the iocb union members has the file pointer
|
* NOTE! Each of the iocb union members has the file pointer
|
||||||
* as the first entry in their struct definition. So you can
|
* as the first entry in their struct definition. So you can
|
||||||
@ -2024,7 +2028,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
|
|||||||
|
|
||||||
all_flushed = list_empty(&ctx->cq_overflow_list);
|
all_flushed = list_empty(&ctx->cq_overflow_list);
|
||||||
if (all_flushed) {
|
if (all_flushed) {
|
||||||
clear_bit(0, &ctx->check_cq_overflow);
|
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
|
||||||
WRITE_ONCE(ctx->rings->sq_flags,
|
WRITE_ONCE(ctx->rings->sq_flags,
|
||||||
ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
|
ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
|
||||||
}
|
}
|
||||||
@ -2040,7 +2044,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
|
|||||||
{
|
{
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
if (test_bit(0, &ctx->check_cq_overflow)) {
|
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
|
||||||
/* iopoll syncs against uring_lock, not completion_lock */
|
/* iopoll syncs against uring_lock, not completion_lock */
|
||||||
if (ctx->flags & IORING_SETUP_IOPOLL)
|
if (ctx->flags & IORING_SETUP_IOPOLL)
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
@ -2118,7 +2122,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (list_empty(&ctx->cq_overflow_list)) {
|
if (list_empty(&ctx->cq_overflow_list)) {
|
||||||
set_bit(0, &ctx->check_cq_overflow);
|
set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
|
||||||
WRITE_ONCE(ctx->rings->sq_flags,
|
WRITE_ONCE(ctx->rings->sq_flags,
|
||||||
ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
|
ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
|
||||||
|
|
||||||
@ -2960,7 +2964,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
|
|||||||
* If we do, we can potentially be spinning for commands that
|
* If we do, we can potentially be spinning for commands that
|
||||||
* already triggered a CQE (eg in error).
|
* already triggered a CQE (eg in error).
|
||||||
*/
|
*/
|
||||||
if (test_bit(0, &ctx->check_cq_overflow))
|
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
|
||||||
__io_cqring_overflow_flush(ctx, false);
|
__io_cqring_overflow_flush(ctx, false);
|
||||||
if (io_cqring_events(ctx))
|
if (io_cqring_events(ctx))
|
||||||
return 0;
|
return 0;
|
||||||
@ -8300,7 +8304,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
|
|||||||
* Cannot safely flush overflowed CQEs from here, ensure we wake up
|
* Cannot safely flush overflowed CQEs from here, ensure we wake up
|
||||||
* the task, and the next invocation will do it.
|
* the task, and the next invocation will do it.
|
||||||
*/
|
*/
|
||||||
if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
|
if (io_should_wake(iowq) ||
|
||||||
|
test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
|
||||||
return autoremove_wake_function(curr, mode, wake_flags, key);
|
return autoremove_wake_function(curr, mode, wake_flags, key);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -8328,7 +8333,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
|
|||||||
if (ret || io_should_wake(iowq))
|
if (ret || io_should_wake(iowq))
|
||||||
return ret;
|
return ret;
|
||||||
/* let the caller flush overflows, retry */
|
/* let the caller flush overflows, retry */
|
||||||
if (test_bit(0, &ctx->check_cq_overflow))
|
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
|
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
|
||||||
@ -10123,7 +10128,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
|
|||||||
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this
|
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this
|
||||||
* pushs them to do the flush.
|
* pushs them to do the flush.
|
||||||
*/
|
*/
|
||||||
if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
|
if (io_cqring_events(ctx) ||
|
||||||
|
test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
|
||||||
mask |= EPOLLIN | EPOLLRDNORM;
|
mask |= EPOLLIN | EPOLLRDNORM;
|
||||||
|
|
||||||
return mask;
|
return mask;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user