blk-throttle: correct calculation of wait time in tg_may_dispatch
In C language, When executing "if (expression1 && expression2)" and expression1 return false, the expression2 may not be executed. For "tg_within_bps_limit(tg, bio, bps_limit, &bps_wait) && tg_within_iops_limit(tg, bio, iops_limit, &iops_wait))", if bps is limited, tg_within_bps_limit will return false and tg_within_iops_limit will not be called. So even bps and iops are both limited, iops_wait will not be calculated and is always zero. So wait time of iops is always ignored. Fix this by always calling tg_within_bps_limit and tg_within_iops_limit to get wait time for both bps and iops. Observed that: 1. Wait time in tg_within_iops_limit/tg_within_bps_limit need always be stored as wait argument is always passed. 2. wait time is stored to zero if iops/bps is limited otherwise non-zero is stored. Simpfy tg_within_iops_limit/tg_within_bps_limit by removing wait argument and return wait time directly. Caller tg_may_dispatch checks if wait time is zero to find if iops/bps is limited. Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Kemeng Shi <shikemeng@huawei.com> Link: https://lore.kernel.org/r/20221205115709.251489-5-shikemeng@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@ -822,17 +822,15 @@ static void tg_update_carryover(struct throtl_grp *tg)
|
|||||||
tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
|
tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
|
static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
|
||||||
u32 iops_limit, unsigned long *wait)
|
u32 iops_limit)
|
||||||
{
|
{
|
||||||
bool rw = bio_data_dir(bio);
|
bool rw = bio_data_dir(bio);
|
||||||
unsigned int io_allowed;
|
unsigned int io_allowed;
|
||||||
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
|
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
|
||||||
|
|
||||||
if (iops_limit == UINT_MAX) {
|
if (iops_limit == UINT_MAX) {
|
||||||
if (wait)
|
return 0;
|
||||||
*wait = 0;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
jiffy_elapsed = jiffies - tg->slice_start[rw];
|
jiffy_elapsed = jiffies - tg->slice_start[rw];
|
||||||
@ -842,21 +840,16 @@ static bool tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
|
|||||||
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
|
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
|
||||||
tg->carryover_ios[rw];
|
tg->carryover_ios[rw];
|
||||||
if (tg->io_disp[rw] + 1 <= io_allowed) {
|
if (tg->io_disp[rw] + 1 <= io_allowed) {
|
||||||
if (wait)
|
return 0;
|
||||||
*wait = 0;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Calc approx time to dispatch */
|
/* Calc approx time to dispatch */
|
||||||
jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
|
jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
|
||||||
|
return jiffy_wait;
|
||||||
if (wait)
|
|
||||||
*wait = jiffy_wait;
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
||||||
u64 bps_limit, unsigned long *wait)
|
u64 bps_limit)
|
||||||
{
|
{
|
||||||
bool rw = bio_data_dir(bio);
|
bool rw = bio_data_dir(bio);
|
||||||
u64 bytes_allowed, extra_bytes;
|
u64 bytes_allowed, extra_bytes;
|
||||||
@ -865,9 +858,7 @@ static bool tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||||||
|
|
||||||
/* no need to throttle if this bio's bytes have been accounted */
|
/* no need to throttle if this bio's bytes have been accounted */
|
||||||
if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
|
if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
|
||||||
if (wait)
|
return 0;
|
||||||
*wait = 0;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
|
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
|
||||||
@ -880,9 +871,7 @@ static bool tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||||||
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
|
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
|
||||||
tg->carryover_bytes[rw];
|
tg->carryover_bytes[rw];
|
||||||
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
|
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
|
||||||
if (wait)
|
return 0;
|
||||||
*wait = 0;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Calc approx time to dispatch */
|
/* Calc approx time to dispatch */
|
||||||
@ -897,9 +886,7 @@ static bool tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||||||
* up we did. Add that time also.
|
* up we did. Add that time also.
|
||||||
*/
|
*/
|
||||||
jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
|
jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
|
||||||
if (wait)
|
return jiffy_wait;
|
||||||
*wait = jiffy_wait;
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -947,8 +934,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
|
|||||||
jiffies + tg->td->throtl_slice);
|
jiffies + tg->td->throtl_slice);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tg_within_bps_limit(tg, bio, bps_limit, &bps_wait) &&
|
bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
|
||||||
tg_within_iops_limit(tg, bio, iops_limit, &iops_wait)) {
|
iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
|
||||||
|
if (bps_wait + iops_wait == 0) {
|
||||||
if (wait)
|
if (wait)
|
||||||
*wait = 0;
|
*wait = 0;
|
||||||
return true;
|
return true;
|
||||||
|
Reference in New Issue
Block a user