2019-04-30 21:42:43 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
2018-07-03 18:32:35 +03:00
# ifndef RQ_QOS_H
# define RQ_QOS_H
# include <linux/kernel.h>
# include <linux/blkdev.h>
# include <linux/blk_types.h>
# include <linux/atomic.h>
# include <linux/wait.h>
2021-06-09 04:58:21 +03:00
# include <linux/blk-mq.h>
2018-07-03 18:32:35 +03:00
2018-12-17 04:46:00 +03:00
# include "blk-mq-debugfs.h"
struct blk_mq_debugfs_attr ;
2018-07-03 18:32:35 +03:00
enum rq_qos_id {
RQ_QOS_WBT ,
2019-08-29 01:05:56 +03:00
RQ_QOS_LATENCY ,
2019-08-29 01:05:58 +03:00
RQ_QOS_COST ,
2021-06-18 03:44:44 +03:00
RQ_QOS_IOPRIO ,
2018-07-03 18:32:35 +03:00
} ;
struct rq_wait {
wait_queue_head_t wait ;
atomic_t inflight ;
} ;
struct rq_qos {
struct rq_qos_ops * ops ;
struct request_queue * q ;
enum rq_qos_id id ;
struct rq_qos * next ;
2018-12-17 04:46:00 +03:00
# ifdef CONFIG_BLK_DEBUG_FS
struct dentry * debugfs_dir ;
# endif
2018-07-03 18:32:35 +03:00
} ;
struct rq_qos_ops {
2018-11-14 19:02:09 +03:00
void ( * throttle ) ( struct rq_qos * , struct bio * ) ;
2018-07-03 18:14:59 +03:00
void ( * track ) ( struct rq_qos * , struct request * , struct bio * ) ;
2019-08-29 01:05:54 +03:00
void ( * merge ) ( struct rq_qos * , struct request * , struct bio * ) ;
2018-07-03 18:32:35 +03:00
void ( * issue ) ( struct rq_qos * , struct request * ) ;
void ( * requeue ) ( struct rq_qos * , struct request * ) ;
void ( * done ) ( struct rq_qos * , struct request * ) ;
2018-07-03 18:15:00 +03:00
void ( * done_bio ) ( struct rq_qos * , struct bio * ) ;
2018-07-03 18:14:59 +03:00
void ( * cleanup ) ( struct rq_qos * , struct bio * ) ;
2019-08-29 01:05:55 +03:00
void ( * queue_depth_changed ) ( struct rq_qos * ) ;
2018-07-03 18:32:35 +03:00
void ( * exit ) ( struct rq_qos * ) ;
2018-12-17 04:46:00 +03:00
const struct blk_mq_debugfs_attr * debugfs_attrs ;
2018-07-03 18:32:35 +03:00
} ;
struct rq_depth {
unsigned int max_depth ;
int scale_step ;
bool scaled_max ;
unsigned int queue_depth ;
unsigned int default_depth ;
} ;
static inline struct rq_qos * rq_qos_id ( struct request_queue * q ,
enum rq_qos_id id )
{
struct rq_qos * rqos ;
for ( rqos = q - > rq_qos ; rqos ; rqos = rqos - > next ) {
if ( rqos - > id = = id )
break ;
}
return rqos ;
}
static inline struct rq_qos * wbt_rq_qos ( struct request_queue * q )
{
return rq_qos_id ( q , RQ_QOS_WBT ) ;
}
static inline struct rq_qos * blkcg_rq_qos ( struct request_queue * q )
{
2019-08-29 01:05:56 +03:00
return rq_qos_id ( q , RQ_QOS_LATENCY ) ;
2018-07-03 18:32:35 +03:00
}
static inline void rq_wait_init ( struct rq_wait * rq_wait )
{
atomic_set ( & rq_wait - > inflight , 0 ) ;
init_waitqueue_head ( & rq_wait - > wait ) ;
}
static inline void rq_qos_add ( struct request_queue * q , struct rq_qos * rqos )
{
2021-06-09 04:58:21 +03:00
/*
* No IO can be in - flight when adding rqos , so freeze queue , which
* is fine since we only support rq_qos for blk - mq queue .
*
* Reuse - > queue_lock for protecting against other concurrent
* rq_qos adding / deleting
*/
blk_mq_freeze_queue ( q ) ;
spin_lock_irq ( & q - > queue_lock ) ;
2018-07-03 18:32:35 +03:00
rqos - > next = q - > rq_qos ;
q - > rq_qos = rqos ;
2021-06-09 04:58:21 +03:00
spin_unlock_irq ( & q - > queue_lock ) ;
blk_mq_unfreeze_queue ( q ) ;
2018-12-17 04:46:00 +03:00
if ( rqos - > ops - > debugfs_attrs )
blk_mq_debugfs_register_rqos ( rqos ) ;
2018-07-03 18:32:35 +03:00
}
static inline void rq_qos_del ( struct request_queue * q , struct rq_qos * rqos )
{
2019-10-15 18:49:27 +03:00
struct rq_qos * * cur ;
2021-06-09 04:58:21 +03:00
/*
* See comment in rq_qos_add ( ) about freezing queue & using
* - > queue_lock .
*/
blk_mq_freeze_queue ( q ) ;
spin_lock_irq ( & q - > queue_lock ) ;
2019-10-15 18:49:27 +03:00
for ( cur = & q - > rq_qos ; * cur ; cur = & ( * cur ) - > next ) {
if ( * cur = = rqos ) {
* cur = rqos - > next ;
2018-07-03 18:32:35 +03:00
break ;
}
}
2021-06-09 04:58:21 +03:00
spin_unlock_irq ( & q - > queue_lock ) ;
blk_mq_unfreeze_queue ( q ) ;
2018-12-17 04:46:00 +03:00
blk_mq_debugfs_unregister_rqos ( rqos ) ;
2018-07-03 18:32:35 +03:00
}
2018-12-04 20:59:02 +03:00
typedef bool ( acquire_inflight_cb_t ) ( struct rq_wait * rqw , void * private_data ) ;
typedef void ( cleanup_cb_t ) ( struct rq_wait * rqw , void * private_data ) ;
void rq_qos_wait ( struct rq_wait * rqw , void * private_data ,
acquire_inflight_cb_t * acquire_inflight_cb ,
cleanup_cb_t * cleanup_cb ) ;
2018-07-20 04:42:13 +03:00
bool rq_wait_inc_below ( struct rq_wait * rq_wait , unsigned int limit ) ;
2019-10-05 21:59:27 +03:00
bool rq_depth_scale_up ( struct rq_depth * rqd ) ;
bool rq_depth_scale_down ( struct rq_depth * rqd , bool hard_throttle ) ;
2018-07-03 18:32:35 +03:00
bool rq_depth_calc_max_depth ( struct rq_depth * rqd ) ;
2018-11-15 22:25:10 +03:00
void __rq_qos_cleanup ( struct rq_qos * rqos , struct bio * bio ) ;
void __rq_qos_done ( struct rq_qos * rqos , struct request * rq ) ;
void __rq_qos_issue ( struct rq_qos * rqos , struct request * rq ) ;
void __rq_qos_requeue ( struct rq_qos * rqos , struct request * rq ) ;
void __rq_qos_throttle ( struct rq_qos * rqos , struct bio * bio ) ;
void __rq_qos_track ( struct rq_qos * rqos , struct request * rq , struct bio * bio ) ;
2019-08-29 01:05:54 +03:00
void __rq_qos_merge ( struct rq_qos * rqos , struct request * rq , struct bio * bio ) ;
2018-11-15 22:25:10 +03:00
void __rq_qos_done_bio ( struct rq_qos * rqos , struct bio * bio ) ;
2019-08-29 01:05:55 +03:00
void __rq_qos_queue_depth_changed ( struct rq_qos * rqos ) ;
2018-11-15 22:25:10 +03:00
static inline void rq_qos_cleanup ( struct request_queue * q , struct bio * bio )
{
if ( q - > rq_qos )
__rq_qos_cleanup ( q - > rq_qos , bio ) ;
}
static inline void rq_qos_done ( struct request_queue * q , struct request * rq )
{
if ( q - > rq_qos )
__rq_qos_done ( q - > rq_qos , rq ) ;
}
static inline void rq_qos_issue ( struct request_queue * q , struct request * rq )
{
if ( q - > rq_qos )
__rq_qos_issue ( q - > rq_qos , rq ) ;
}
static inline void rq_qos_requeue ( struct request_queue * q , struct request * rq )
{
if ( q - > rq_qos )
__rq_qos_requeue ( q - > rq_qos , rq ) ;
}
2022-03-14 10:15:02 +03:00
static inline void rq_qos_done_bio ( struct bio * bio )
2018-11-15 22:25:10 +03:00
{
2022-03-14 10:15:02 +03:00
if ( bio - > bi_bdev & & ( bio_flagged ( bio , BIO_QOS_THROTTLED ) | |
bio_flagged ( bio , BIO_QOS_MERGED ) ) ) {
struct request_queue * q = bdev_get_queue ( bio - > bi_bdev ) ;
if ( q - > rq_qos )
__rq_qos_done_bio ( q - > rq_qos , bio ) ;
}
2018-11-15 22:25:10 +03:00
}
static inline void rq_qos_throttle ( struct request_queue * q , struct bio * bio )
{
2021-10-16 05:06:18 +03:00
if ( q - > rq_qos ) {
2022-03-14 10:15:02 +03:00
bio_set_flag ( bio , BIO_QOS_THROTTLED ) ;
2018-11-15 22:25:10 +03:00
__rq_qos_throttle ( q - > rq_qos , bio ) ;
2021-10-16 05:06:18 +03:00
}
2018-11-15 22:25:10 +03:00
}
static inline void rq_qos_track ( struct request_queue * q , struct request * rq ,
struct bio * bio )
{
if ( q - > rq_qos )
__rq_qos_track ( q - > rq_qos , rq , bio ) ;
}
2019-08-29 01:05:54 +03:00
static inline void rq_qos_merge ( struct request_queue * q , struct request * rq ,
struct bio * bio )
{
2022-03-14 10:15:02 +03:00
if ( q - > rq_qos ) {
bio_set_flag ( bio , BIO_QOS_MERGED ) ;
2019-08-29 01:05:54 +03:00
__rq_qos_merge ( q - > rq_qos , rq , bio ) ;
2022-03-14 10:15:02 +03:00
}
2019-08-29 01:05:54 +03:00
}
2019-08-29 01:05:55 +03:00
static inline void rq_qos_queue_depth_changed ( struct request_queue * q )
{
if ( q - > rq_qos )
__rq_qos_queue_depth_changed ( q - > rq_qos ) ;
}
2018-07-03 18:32:35 +03:00
void rq_qos_exit ( struct request_queue * ) ;
2018-11-15 22:25:10 +03:00
2018-07-03 18:32:35 +03:00
# endif