2019-04-30 21:42:43 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
2018-07-03 18:32:35 +03:00
# ifndef RQ_QOS_H
# define RQ_QOS_H
# include <linux/kernel.h>
# include <linux/blkdev.h>
# include <linux/blk_types.h>
# include <linux/atomic.h>
# include <linux/wait.h>
2018-12-17 04:46:00 +03:00
# include "blk-mq-debugfs.h"
struct blk_mq_debugfs_attr ;
2018-07-03 18:32:35 +03:00
enum rq_qos_id {
RQ_QOS_WBT ,
RQ_QOS_CGROUP ,
} ;
struct rq_wait {
wait_queue_head_t wait ;
atomic_t inflight ;
} ;
struct rq_qos {
struct rq_qos_ops * ops ;
struct request_queue * q ;
enum rq_qos_id id ;
struct rq_qos * next ;
2018-12-17 04:46:00 +03:00
# ifdef CONFIG_BLK_DEBUG_FS
struct dentry * debugfs_dir ;
# endif
2018-07-03 18:32:35 +03:00
} ;
struct rq_qos_ops {
2018-11-14 19:02:09 +03:00
void ( * throttle ) ( struct rq_qos * , struct bio * ) ;
2018-07-03 18:14:59 +03:00
void ( * track ) ( struct rq_qos * , struct request * , struct bio * ) ;
2018-07-03 18:32:35 +03:00
void ( * issue ) ( struct rq_qos * , struct request * ) ;
void ( * requeue ) ( struct rq_qos * , struct request * ) ;
void ( * done ) ( struct rq_qos * , struct request * ) ;
2018-07-03 18:15:00 +03:00
void ( * done_bio ) ( struct rq_qos * , struct bio * ) ;
2018-07-03 18:14:59 +03:00
void ( * cleanup ) ( struct rq_qos * , struct bio * ) ;
2018-07-03 18:32:35 +03:00
void ( * exit ) ( struct rq_qos * ) ;
2018-12-17 04:46:00 +03:00
const struct blk_mq_debugfs_attr * debugfs_attrs ;
2018-07-03 18:32:35 +03:00
} ;
struct rq_depth {
unsigned int max_depth ;
int scale_step ;
bool scaled_max ;
unsigned int queue_depth ;
unsigned int default_depth ;
} ;
static inline struct rq_qos * rq_qos_id ( struct request_queue * q ,
enum rq_qos_id id )
{
struct rq_qos * rqos ;
for ( rqos = q - > rq_qos ; rqos ; rqos = rqos - > next ) {
if ( rqos - > id = = id )
break ;
}
return rqos ;
}
static inline struct rq_qos * wbt_rq_qos ( struct request_queue * q )
{
return rq_qos_id ( q , RQ_QOS_WBT ) ;
}
static inline struct rq_qos * blkcg_rq_qos ( struct request_queue * q )
{
return rq_qos_id ( q , RQ_QOS_CGROUP ) ;
}
2018-12-17 04:46:00 +03:00
static inline const char * rq_qos_id_to_name ( enum rq_qos_id id )
{
switch ( id ) {
case RQ_QOS_WBT :
return " wbt " ;
case RQ_QOS_CGROUP :
return " cgroup " ;
}
return " unknown " ;
}
2018-07-03 18:32:35 +03:00
static inline void rq_wait_init ( struct rq_wait * rq_wait )
{
atomic_set ( & rq_wait - > inflight , 0 ) ;
init_waitqueue_head ( & rq_wait - > wait ) ;
}
static inline void rq_qos_add ( struct request_queue * q , struct rq_qos * rqos )
{
rqos - > next = q - > rq_qos ;
q - > rq_qos = rqos ;
2018-12-17 04:46:00 +03:00
if ( rqos - > ops - > debugfs_attrs )
blk_mq_debugfs_register_rqos ( rqos ) ;
2018-07-03 18:32:35 +03:00
}
static inline void rq_qos_del ( struct request_queue * q , struct rq_qos * rqos )
{
struct rq_qos * cur , * prev = NULL ;
for ( cur = q - > rq_qos ; cur ; cur = cur - > next ) {
if ( cur = = rqos ) {
if ( prev )
prev - > next = rqos - > next ;
else
q - > rq_qos = cur ;
break ;
}
prev = cur ;
}
2018-12-17 04:46:00 +03:00
blk_mq_debugfs_unregister_rqos ( rqos ) ;
2018-07-03 18:32:35 +03:00
}
2018-12-04 20:59:02 +03:00
typedef bool ( acquire_inflight_cb_t ) ( struct rq_wait * rqw , void * private_data ) ;
typedef void ( cleanup_cb_t ) ( struct rq_wait * rqw , void * private_data ) ;
void rq_qos_wait ( struct rq_wait * rqw , void * private_data ,
acquire_inflight_cb_t * acquire_inflight_cb ,
cleanup_cb_t * cleanup_cb ) ;
2018-07-20 04:42:13 +03:00
bool rq_wait_inc_below ( struct rq_wait * rq_wait , unsigned int limit ) ;
2018-07-03 18:32:35 +03:00
void rq_depth_scale_up ( struct rq_depth * rqd ) ;
void rq_depth_scale_down ( struct rq_depth * rqd , bool hard_throttle ) ;
bool rq_depth_calc_max_depth ( struct rq_depth * rqd ) ;
2018-11-15 22:25:10 +03:00
void __rq_qos_cleanup ( struct rq_qos * rqos , struct bio * bio ) ;
void __rq_qos_done ( struct rq_qos * rqos , struct request * rq ) ;
void __rq_qos_issue ( struct rq_qos * rqos , struct request * rq ) ;
void __rq_qos_requeue ( struct rq_qos * rqos , struct request * rq ) ;
void __rq_qos_throttle ( struct rq_qos * rqos , struct bio * bio ) ;
void __rq_qos_track ( struct rq_qos * rqos , struct request * rq , struct bio * bio ) ;
void __rq_qos_done_bio ( struct rq_qos * rqos , struct bio * bio ) ;
static inline void rq_qos_cleanup ( struct request_queue * q , struct bio * bio )
{
if ( q - > rq_qos )
__rq_qos_cleanup ( q - > rq_qos , bio ) ;
}
static inline void rq_qos_done ( struct request_queue * q , struct request * rq )
{
if ( q - > rq_qos )
__rq_qos_done ( q - > rq_qos , rq ) ;
}
static inline void rq_qos_issue ( struct request_queue * q , struct request * rq )
{
if ( q - > rq_qos )
__rq_qos_issue ( q - > rq_qos , rq ) ;
}
static inline void rq_qos_requeue ( struct request_queue * q , struct request * rq )
{
if ( q - > rq_qos )
__rq_qos_requeue ( q - > rq_qos , rq ) ;
}
static inline void rq_qos_done_bio ( struct request_queue * q , struct bio * bio )
{
if ( q - > rq_qos )
__rq_qos_done_bio ( q - > rq_qos , bio ) ;
}
static inline void rq_qos_throttle ( struct request_queue * q , struct bio * bio )
{
2018-12-17 19:03:51 +03:00
/*
* BIO_TRACKED lets controllers know that a bio went through the
* normal rq_qos path .
*/
bio_set_flag ( bio , BIO_TRACKED ) ;
2018-11-15 22:25:10 +03:00
if ( q - > rq_qos )
__rq_qos_throttle ( q - > rq_qos , bio ) ;
}
static inline void rq_qos_track ( struct request_queue * q , struct request * rq ,
struct bio * bio )
{
if ( q - > rq_qos )
__rq_qos_track ( q - > rq_qos , rq , bio ) ;
}
2018-07-03 18:32:35 +03:00
void rq_qos_exit ( struct request_queue * ) ;
2018-11-15 22:25:10 +03:00
2018-07-03 18:32:35 +03:00
# endif