2019-04-30 21:42:43 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-01-15 03:11:11 +03:00
/*
* MQ Deadline i / o scheduler - adaptation of the legacy deadline scheduler ,
* for the blk - mq scheduling framework
*
* Copyright ( C ) 2016 Jens Axboe < axboe @ kernel . dk >
*/
# include <linux/kernel.h>
# include <linux/fs.h>
# include <linux/blkdev.h>
# include <linux/bio.h>
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/init.h>
# include <linux/compiler.h>
# include <linux/rbtree.h>
# include <linux/sbitmap.h>
2021-02-22 08:29:59 +03:00
# include <trace/events/block.h>
2021-09-20 15:33:23 +03:00
# include "elevator.h"
2017-01-15 03:11:11 +03:00
# include "blk.h"
# include "blk-mq.h"
2017-05-04 10:31:34 +03:00
# include "blk-mq-debugfs.h"
2017-01-15 03:11:11 +03:00
# include "blk-mq-sched.h"
/*
2019-04-19 01:45:00 +03:00
* See Documentation / block / deadline - iosched . rst
2017-01-15 03:11:11 +03:00
*/
static const int read_expire = HZ / 2 ; /* max time before a read is submitted. */
static const int write_expire = 5 * HZ ; /* ditto for writes, these limits are SOFT! */
2021-09-28 01:03:28 +03:00
/*
* Time after which to dispatch lower priority requests even if higher
* priority requests are pending .
*/
static const int prio_aging_expire = 10 * HZ ;
2017-01-15 03:11:11 +03:00
static const int writes_starved = 2 ; /* max times reads can starve a write */
static const int fifo_batch = 16 ; /* # of sequential requests treated as one
by the above parameters . For throughput . */
2021-06-18 03:44:49 +03:00
enum dd_data_dir {
DD_READ = READ ,
DD_WRITE = WRITE ,
} ;
enum { DD_DIR_COUNT = 2 } ;
2021-06-18 03:44:53 +03:00
enum dd_prio {
DD_RT_PRIO = 0 ,
DD_BE_PRIO = 1 ,
DD_IDLE_PRIO = 2 ,
DD_PRIO_MAX = 2 ,
} ;
enum { DD_PRIO_COUNT = 3 } ;
2021-09-28 01:03:27 +03:00
/*
* I / O statistics per I / O priority . It is fine if these counters overflow .
* What matters is that these counters are at least as wide as
* log2 ( max_outstanding_requests ) .
*/
2021-08-11 20:41:45 +03:00
struct io_stats_per_prio {
2021-09-28 01:03:27 +03:00
uint32_t inserted ;
uint32_t merged ;
uint32_t dispatched ;
atomic_t completed ;
2021-06-18 03:44:54 +03:00
} ;
2021-06-18 03:44:53 +03:00
/*
* Deadline scheduler data per I / O priority ( enum dd_prio ) . Requests are
* present on both sort_list [ ] and fifo_list [ ] .
*/
struct dd_per_prio {
struct list_head dispatch ;
struct rb_root sort_list [ DD_DIR_COUNT ] ;
struct list_head fifo_list [ DD_DIR_COUNT ] ;
2023-05-17 20:42:27 +03:00
/* Position of the most recently dispatched request. */
sector_t latest_pos [ DD_DIR_COUNT ] ;
2021-09-28 01:03:27 +03:00
struct io_stats_per_prio stats ;
2021-06-18 03:44:53 +03:00
} ;
2017-01-15 03:11:11 +03:00
struct deadline_data {
/*
* run time data
*/
2021-06-18 03:44:53 +03:00
struct dd_per_prio per_prio [ DD_PRIO_COUNT ] ;
2017-01-15 03:11:11 +03:00
2021-06-18 03:44:52 +03:00
/* Data direction of latest dispatched request. */
enum dd_data_dir last_dir ;
2017-01-15 03:11:11 +03:00
unsigned int batching ; /* number of sequential requests made */
unsigned int starved ; /* times reads have starved writes */
/*
* settings that change how the i / o scheduler behaves
*/
2021-06-18 03:44:49 +03:00
int fifo_expire [ DD_DIR_COUNT ] ;
2017-01-15 03:11:11 +03:00
int fifo_batch ;
int writes_starved ;
int front_merges ;
2021-06-18 03:44:51 +03:00
u32 async_depth ;
2021-09-28 01:03:28 +03:00
int prio_aging_expire ;
2017-01-15 03:11:11 +03:00
spinlock_t lock ;
2017-12-21 09:43:40 +03:00
spinlock_t zone_lock ;
2021-06-18 03:44:53 +03:00
} ;
/* Maps an I/O priority class to a deadline scheduler priority. */
static const enum dd_prio ioprio_class_to_prio [ ] = {
[ IOPRIO_CLASS_NONE ] = DD_BE_PRIO ,
[ IOPRIO_CLASS_RT ] = DD_RT_PRIO ,
[ IOPRIO_CLASS_BE ] = DD_BE_PRIO ,
[ IOPRIO_CLASS_IDLE ] = DD_IDLE_PRIO ,
2017-01-15 03:11:11 +03:00
} ;
static inline struct rb_root *
2021-06-18 03:44:53 +03:00
deadline_rb_root ( struct dd_per_prio * per_prio , struct request * rq )
2017-01-15 03:11:11 +03:00
{
2021-06-18 03:44:53 +03:00
return & per_prio - > sort_list [ rq_data_dir ( rq ) ] ;
}
/*
* Returns the I / O priority class ( IOPRIO_CLASS_ * ) that has been assigned to a
* request .
*/
static u8 dd_rq_ioclass ( struct request * rq )
{
return IOPRIO_PRIO_CLASS ( req_get_ioprio ( rq ) ) ;
2017-01-15 03:11:11 +03:00
}
2022-11-24 05:12:08 +03:00
/*
* get the request before ` rq ' in sector - sorted order
*/
static inline struct request *
deadline_earlier_request ( struct request * rq )
{
struct rb_node * node = rb_prev ( & rq - > rb_node ) ;
if ( node )
return rb_entry_rq ( node ) ;
return NULL ;
}
2017-01-15 03:11:11 +03:00
/*
* get the request after ` rq ' in sector - sorted order
*/
static inline struct request *
deadline_latter_request ( struct request * rq )
{
struct rb_node * node = rb_next ( & rq - > rb_node ) ;
if ( node )
return rb_entry_rq ( node ) ;
return NULL ;
}
2023-05-17 20:42:28 +03:00
/*
* Return the first request for which blk_rq_pos ( ) > = @ pos . For zoned devices ,
* return the first request after the start of the zone containing @ pos .
*/
2023-05-17 20:42:27 +03:00
static inline struct request * deadline_from_pos ( struct dd_per_prio * per_prio ,
enum dd_data_dir data_dir , sector_t pos )
{
struct rb_node * node = per_prio - > sort_list [ data_dir ] . rb_node ;
struct request * rq , * res = NULL ;
2023-05-17 20:42:28 +03:00
if ( ! node )
return NULL ;
rq = rb_entry_rq ( node ) ;
/*
* A zoned write may have been requeued with a starting position that
* is below that of the most recently dispatched request . Hence , for
* zoned writes , start searching from the start of a zone .
*/
if ( blk_rq_is_seq_zoned_write ( rq ) )
pos - = round_down ( pos , rq - > q - > limits . chunk_sectors ) ;
2023-05-17 20:42:27 +03:00
while ( node ) {
rq = rb_entry_rq ( node ) ;
if ( blk_rq_pos ( rq ) > = pos ) {
res = rq ;
node = node - > rb_left ;
} else {
node = node - > rb_right ;
}
}
return res ;
}
2017-01-15 03:11:11 +03:00
static void
2021-06-18 03:44:53 +03:00
deadline_add_rq_rb ( struct dd_per_prio * per_prio , struct request * rq )
2017-01-15 03:11:11 +03:00
{
2021-06-18 03:44:53 +03:00
struct rb_root * root = deadline_rb_root ( per_prio , rq ) ;
2017-01-15 03:11:11 +03:00
elv_rb_add ( root , rq ) ;
}
static inline void
2021-06-18 03:44:53 +03:00
deadline_del_rq_rb ( struct dd_per_prio * per_prio , struct request * rq )
2017-01-15 03:11:11 +03:00
{
2021-06-18 03:44:53 +03:00
elv_rb_del ( deadline_rb_root ( per_prio , rq ) , rq ) ;
2017-01-15 03:11:11 +03:00
}
/*
* remove rq from rbtree and fifo .
*/
2021-06-18 03:44:53 +03:00
static void deadline_remove_request ( struct request_queue * q ,
struct dd_per_prio * per_prio ,
struct request * rq )
2017-01-15 03:11:11 +03:00
{
list_del_init ( & rq - > queuelist ) ;
/*
* We might not be on the rbtree , if we are doing an insert merge
*/
if ( ! RB_EMPTY_NODE ( & rq - > rb_node ) )
2021-06-18 03:44:53 +03:00
deadline_del_rq_rb ( per_prio , rq ) ;
2017-01-15 03:11:11 +03:00
elv_rqhash_del ( q , rq ) ;
if ( q - > last_merge = = rq )
q - > last_merge = NULL ;
}
static void dd_request_merged ( struct request_queue * q , struct request * req ,
2017-02-08 16:46:48 +03:00
enum elv_merge type )
2017-01-15 03:11:11 +03:00
{
struct deadline_data * dd = q - > elevator - > elevator_data ;
2021-06-18 03:44:53 +03:00
const u8 ioprio_class = dd_rq_ioclass ( req ) ;
const enum dd_prio prio = ioprio_class_to_prio [ ioprio_class ] ;
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ;
2017-01-15 03:11:11 +03:00
/*
* if the merge was a front merge , we need to reposition request
*/
if ( type = = ELEVATOR_FRONT_MERGE ) {
2021-06-18 03:44:53 +03:00
elv_rb_del ( deadline_rb_root ( per_prio , req ) , req ) ;
deadline_add_rq_rb ( per_prio , req ) ;
2017-01-15 03:11:11 +03:00
}
}
2021-06-18 03:44:45 +03:00
/*
* Callback function that is invoked after @ next has been merged into @ req .
*/
2017-01-15 03:11:11 +03:00
static void dd_merged_requests ( struct request_queue * q , struct request * req ,
struct request * next )
{
2021-06-18 03:44:54 +03:00
struct deadline_data * dd = q - > elevator - > elevator_data ;
2021-06-18 03:44:53 +03:00
const u8 ioprio_class = dd_rq_ioclass ( next ) ;
const enum dd_prio prio = ioprio_class_to_prio [ ioprio_class ] ;
2021-09-28 01:03:27 +03:00
lockdep_assert_held ( & dd - > lock ) ;
dd - > per_prio [ prio ] . stats . merged + + ;
2021-06-18 03:44:54 +03:00
2017-01-15 03:11:11 +03:00
/*
* if next expires before rq , assign its expire time to rq
* and move into next position ( next will be deleted ) in fifo
*/
if ( ! list_empty ( & req - > queuelist ) & & ! list_empty ( & next - > queuelist ) ) {
if ( time_before ( ( unsigned long ) next - > fifo_time ,
( unsigned long ) req - > fifo_time ) ) {
list_move ( & req - > queuelist , & next - > queuelist ) ;
req - > fifo_time = next - > fifo_time ;
}
}
/*
* kill knowledge of next , this one is a goner
*/
2021-06-18 03:44:53 +03:00
deadline_remove_request ( q , & dd - > per_prio [ prio ] , next ) ;
2017-01-15 03:11:11 +03:00
}
/*
* move an entry to dispatch queue
*/
static void
2021-06-18 03:44:53 +03:00
deadline_move_request ( struct deadline_data * dd , struct dd_per_prio * per_prio ,
struct request * rq )
2017-01-15 03:11:11 +03:00
{
/*
* take it off the sort and fifo list
*/
2021-06-18 03:44:53 +03:00
deadline_remove_request ( rq - > q , per_prio , rq ) ;
2017-01-15 03:11:11 +03:00
}
2021-09-28 01:03:26 +03:00
/* Number of requests queued for a given priority level. */
static u32 dd_queued ( struct deadline_data * dd , enum dd_prio prio )
{
2021-09-28 01:03:27 +03:00
const struct io_stats_per_prio * stats = & dd - > per_prio [ prio ] . stats ;
lockdep_assert_held ( & dd - > lock ) ;
return stats - > inserted - atomic_read ( & stats - > completed ) ;
2021-09-28 01:03:26 +03:00
}
2017-01-15 03:11:11 +03:00
/*
2023-05-17 20:42:24 +03:00
* deadline_check_fifo returns true if and only if there are expired requests
* in the FIFO list . Requires ! list_empty ( & dd - > fifo_list [ data_dir ] ) .
2017-01-15 03:11:11 +03:00
*/
2023-05-17 20:42:24 +03:00
static inline bool deadline_check_fifo ( struct dd_per_prio * per_prio ,
enum dd_data_dir data_dir )
2017-01-15 03:11:11 +03:00
{
2021-06-18 03:44:53 +03:00
struct request * rq = rq_entry_fifo ( per_prio - > fifo_list [ data_dir ] . next ) ;
2017-01-15 03:11:11 +03:00
2023-05-17 20:42:24 +03:00
return time_is_before_eq_jiffies ( ( unsigned long ) rq - > fifo_time ) ;
2017-01-15 03:11:11 +03:00
}
2022-11-24 05:12:08 +03:00
/*
* Check if rq has a sequential request preceding it .
*/
2022-11-26 05:55:49 +03:00
static bool deadline_is_seq_write ( struct deadline_data * dd , struct request * rq )
2022-11-24 05:12:08 +03:00
{
struct request * prev = deadline_earlier_request ( rq ) ;
if ( ! prev )
return false ;
return blk_rq_pos ( prev ) + blk_rq_sectors ( prev ) = = blk_rq_pos ( rq ) ;
}
/*
* Skip all write requests that are sequential from @ rq , even if we cross
* a zone boundary .
*/
static struct request * deadline_skip_seq_writes ( struct deadline_data * dd ,
struct request * rq )
{
sector_t pos = blk_rq_pos ( rq ) ;
2023-05-17 20:42:25 +03:00
do {
pos + = blk_rq_sectors ( rq ) ;
2022-11-24 05:12:08 +03:00
rq = deadline_latter_request ( rq ) ;
2023-05-17 20:42:25 +03:00
} while ( rq & & blk_rq_pos ( rq ) = = pos ) ;
2022-11-24 05:12:08 +03:00
return rq ;
}
2017-12-21 09:43:39 +03:00
/*
* For the specified data direction , return the next request to
* dispatch using arrival ordered lists .
*/
static struct request *
2021-06-18 03:44:53 +03:00
deadline_fifo_request ( struct deadline_data * dd , struct dd_per_prio * per_prio ,
enum dd_data_dir data_dir )
2017-12-21 09:43:39 +03:00
{
2023-05-17 20:42:29 +03:00
struct request * rq , * rb_rq , * next ;
2017-12-21 09:43:40 +03:00
unsigned long flags ;
2021-06-18 03:44:53 +03:00
if ( list_empty ( & per_prio - > fifo_list [ data_dir ] ) )
2017-12-21 09:43:39 +03:00
return NULL ;
2021-06-18 03:44:53 +03:00
rq = rq_entry_fifo ( per_prio - > fifo_list [ data_dir ] . next ) ;
2021-06-18 03:44:49 +03:00
if ( data_dir = = DD_READ | | ! blk_queue_is_zoned ( rq - > q ) )
2017-12-21 09:43:40 +03:00
return rq ;
/*
* Look for a write request that can be dispatched , that is one with
2022-11-24 05:12:08 +03:00
* an unlocked target zone . For some HDDs , breaking a sequential
* write stream can lead to lower throughput , so make sure to preserve
* sequential write streams , even if that stream crosses into the next
* zones and these zones are unlocked .
2017-12-21 09:43:40 +03:00
*/
spin_lock_irqsave ( & dd - > zone_lock , flags ) ;
2023-05-17 20:42:29 +03:00
list_for_each_entry_safe ( rq , next , & per_prio - > fifo_list [ DD_WRITE ] ,
queuelist ) {
/* Check whether a prior request exists for the same zone. */
rb_rq = deadline_from_pos ( per_prio , data_dir , blk_rq_pos ( rq ) ) ;
if ( rb_rq & & blk_rq_pos ( rb_rq ) < blk_rq_pos ( rq ) )
rq = rb_rq ;
2022-11-24 05:12:08 +03:00
if ( blk_req_can_dispatch_to_zone ( rq ) & &
( blk_queue_nonrot ( rq - > q ) | |
2022-11-26 05:55:49 +03:00
! deadline_is_seq_write ( dd , rq ) ) )
2017-12-21 09:43:40 +03:00
goto out ;
}
rq = NULL ;
out :
spin_unlock_irqrestore ( & dd - > zone_lock , flags ) ;
return rq ;
2017-12-21 09:43:39 +03:00
}
/*
* For the specified data direction , return the next request to
* dispatch using sector position sorted lists .
*/
static struct request *
2021-06-18 03:44:53 +03:00
deadline_next_request ( struct deadline_data * dd , struct dd_per_prio * per_prio ,
enum dd_data_dir data_dir )
2017-12-21 09:43:39 +03:00
{
2017-12-21 09:43:40 +03:00
struct request * rq ;
unsigned long flags ;
2023-05-17 20:42:27 +03:00
rq = deadline_from_pos ( per_prio , data_dir ,
per_prio - > latest_pos [ data_dir ] ) ;
2017-12-21 09:43:40 +03:00
if ( ! rq )
return NULL ;
2021-06-18 03:44:49 +03:00
if ( data_dir = = DD_READ | | ! blk_queue_is_zoned ( rq - > q ) )
2017-12-21 09:43:40 +03:00
return rq ;
/*
* Look for a write request that can be dispatched , that is one with
2022-11-24 05:12:08 +03:00
* an unlocked target zone . For some HDDs , breaking a sequential
* write stream can lead to lower throughput , so make sure to preserve
* sequential write streams , even if that stream crosses into the next
* zones and these zones are unlocked .
2017-12-21 09:43:40 +03:00
*/
spin_lock_irqsave ( & dd - > zone_lock , flags ) ;
while ( rq ) {
if ( blk_req_can_dispatch_to_zone ( rq ) )
break ;
2022-11-24 05:12:08 +03:00
if ( blk_queue_nonrot ( rq - > q ) )
rq = deadline_latter_request ( rq ) ;
else
rq = deadline_skip_seq_writes ( dd , rq ) ;
2017-12-21 09:43:40 +03:00
}
spin_unlock_irqrestore ( & dd - > zone_lock , flags ) ;
return rq ;
2017-12-21 09:43:39 +03:00
}
2021-09-28 01:03:28 +03:00
/*
* Returns true if and only if @ rq started after @ latest_start where
* @ latest_start is in jiffies .
*/
static bool started_after ( struct deadline_data * dd , struct request * rq ,
unsigned long latest_start )
{
unsigned long start_time = ( unsigned long ) rq - > fifo_time ;
start_time - = dd - > fifo_expire [ rq_data_dir ( rq ) ] ;
return time_after ( start_time , latest_start ) ;
}
2017-01-15 03:11:11 +03:00
/*
* deadline_dispatch_requests selects the best request according to
2021-09-28 01:03:28 +03:00
* read / write expire , fifo_batch , etc and with a start time < = @ latest_start .
2017-01-15 03:11:11 +03:00
*/
2021-06-18 03:44:53 +03:00
static struct request * __dd_dispatch_request ( struct deadline_data * dd ,
2021-09-28 01:03:28 +03:00
struct dd_per_prio * per_prio ,
unsigned long latest_start )
2017-01-15 03:11:11 +03:00
{
2017-12-21 09:43:39 +03:00
struct request * rq , * next_rq ;
2021-06-18 03:44:49 +03:00
enum dd_data_dir data_dir ;
2021-06-18 03:44:54 +03:00
enum dd_prio prio ;
u8 ioprio_class ;
2017-01-15 03:11:11 +03:00
2021-06-18 03:44:46 +03:00
lockdep_assert_held ( & dd - > lock ) ;
2021-06-18 03:44:53 +03:00
if ( ! list_empty ( & per_prio - > dispatch ) ) {
rq = list_first_entry ( & per_prio - > dispatch , struct request ,
queuelist ) ;
2021-09-28 01:03:28 +03:00
if ( started_after ( dd , rq , latest_start ) )
return NULL ;
2017-01-15 03:11:11 +03:00
list_del_init ( & rq - > queuelist ) ;
2023-05-17 20:42:27 +03:00
data_dir = rq_data_dir ( rq ) ;
2017-01-15 03:11:11 +03:00
goto done ;
}
/*
* batches are currently reads XOR writes
*/
2021-06-18 03:44:53 +03:00
rq = deadline_next_request ( dd , per_prio , dd - > last_dir ) ;
2023-05-17 20:42:27 +03:00
if ( rq & & dd - > batching < dd - > fifo_batch ) {
2023-05-17 20:42:19 +03:00
/* we have a next request and are still entitled to batch */
2023-05-17 20:42:27 +03:00
data_dir = rq_data_dir ( rq ) ;
2017-01-15 03:11:11 +03:00
goto dispatch_request ;
2023-05-17 20:42:27 +03:00
}
2017-01-15 03:11:11 +03:00
/*
* at this point we are not running a batch . select the appropriate
* data direction ( read / write )
*/
2021-06-18 03:44:53 +03:00
if ( ! list_empty ( & per_prio - > fifo_list [ DD_READ ] ) ) {
BUG_ON ( RB_EMPTY_ROOT ( & per_prio - > sort_list [ DD_READ ] ) ) ;
2017-01-15 03:11:11 +03:00
2021-06-18 03:44:53 +03:00
if ( deadline_fifo_request ( dd , per_prio , DD_WRITE ) & &
2017-12-21 09:43:40 +03:00
( dd - > starved + + > = dd - > writes_starved ) )
2017-01-15 03:11:11 +03:00
goto dispatch_writes ;
2021-06-18 03:44:49 +03:00
data_dir = DD_READ ;
2017-01-15 03:11:11 +03:00
goto dispatch_find_request ;
}
/*
* there are either no reads or writes have been starved
*/
2021-06-18 03:44:53 +03:00
if ( ! list_empty ( & per_prio - > fifo_list [ DD_WRITE ] ) ) {
2017-01-15 03:11:11 +03:00
dispatch_writes :
2021-06-18 03:44:53 +03:00
BUG_ON ( RB_EMPTY_ROOT ( & per_prio - > sort_list [ DD_WRITE ] ) ) ;
2017-01-15 03:11:11 +03:00
dd - > starved = 0 ;
2021-06-18 03:44:49 +03:00
data_dir = DD_WRITE ;
2017-01-15 03:11:11 +03:00
goto dispatch_find_request ;
}
return NULL ;
dispatch_find_request :
/*
* we are not running a batch , find best request for selected data_dir
*/
2021-06-18 03:44:53 +03:00
next_rq = deadline_next_request ( dd , per_prio , data_dir ) ;
if ( deadline_check_fifo ( per_prio , data_dir ) | | ! next_rq ) {
2017-01-15 03:11:11 +03:00
/*
* A deadline has expired , the last request was in the other
* direction , or we have run out of higher - sectored requests .
* Start again from the request with the earliest expiry time .
*/
2021-06-18 03:44:53 +03:00
rq = deadline_fifo_request ( dd , per_prio , data_dir ) ;
2017-01-15 03:11:11 +03:00
} else {
/*
* The last req was the same dir and we have a next request in
* sort order . No expired requests so continue on from here .
*/
2017-12-21 09:43:39 +03:00
rq = next_rq ;
2017-01-15 03:11:11 +03:00
}
2017-12-21 09:43:40 +03:00
/*
* For a zoned block device , if we only have writes queued and none of
* them can be dispatched , rq will be NULL .
*/
if ( ! rq )
return NULL ;
2021-06-18 03:44:52 +03:00
dd - > last_dir = data_dir ;
2017-01-15 03:11:11 +03:00
dd - > batching = 0 ;
dispatch_request :
2021-09-28 01:03:28 +03:00
if ( started_after ( dd , rq , latest_start ) )
return NULL ;
2017-01-15 03:11:11 +03:00
/*
* rq is the selected appropriate request .
*/
dd - > batching + + ;
2021-06-18 03:44:53 +03:00
deadline_move_request ( dd , per_prio , rq ) ;
2017-01-15 03:11:11 +03:00
done :
2021-06-18 03:44:54 +03:00
ioprio_class = dd_rq_ioclass ( rq ) ;
prio = ioprio_class_to_prio [ ioprio_class ] ;
2023-05-17 20:42:27 +03:00
dd - > per_prio [ prio ] . latest_pos [ data_dir ] = blk_rq_pos ( rq ) ;
2021-09-28 01:03:27 +03:00
dd - > per_prio [ prio ] . stats . dispatched + + ;
2017-12-21 09:43:40 +03:00
/*
* If the request needs its target zone locked , do it .
*/
blk_req_zone_write_lock ( rq ) ;
2017-01-15 03:11:11 +03:00
rq - > rq_flags | = RQF_STARTED ;
return rq ;
}
2021-09-28 01:03:28 +03:00
/*
* Check whether there are any requests with priority other than DD_RT_PRIO
* that were inserted more than prio_aging_expire jiffies ago .
*/
static struct request * dd_dispatch_prio_aged_requests ( struct deadline_data * dd ,
unsigned long now )
{
struct request * rq ;
enum dd_prio prio ;
int prio_cnt ;
lockdep_assert_held ( & dd - > lock ) ;
prio_cnt = ! ! dd_queued ( dd , DD_RT_PRIO ) + ! ! dd_queued ( dd , DD_BE_PRIO ) +
! ! dd_queued ( dd , DD_IDLE_PRIO ) ;
if ( prio_cnt < 2 )
return NULL ;
for ( prio = DD_BE_PRIO ; prio < = DD_PRIO_MAX ; prio + + ) {
rq = __dd_dispatch_request ( dd , & dd - > per_prio [ prio ] ,
now - dd - > prio_aging_expire ) ;
if ( rq )
return rq ;
}
return NULL ;
}
2018-01-06 19:23:11 +03:00
/*
2021-06-18 03:44:45 +03:00
* Called from blk_mq_run_hw_queue ( ) - > __blk_mq_sched_dispatch_requests ( ) .
*
2018-01-06 19:23:11 +03:00
* One confusing aspect here is that we get called for a specific
2018-12-17 09:14:05 +03:00
* hardware queue , but we may return a request that is for a
2018-01-06 19:23:11 +03:00
* different hardware queue . This is because mq - deadline has shared
* state for all hardware queues , in terms of sorting , FIFOs , etc .
*/
2017-01-26 22:40:07 +03:00
static struct request * dd_dispatch_request ( struct blk_mq_hw_ctx * hctx )
2017-01-15 03:11:11 +03:00
{
struct deadline_data * dd = hctx - > queue - > elevator - > elevator_data ;
2021-09-28 01:03:28 +03:00
const unsigned long now = jiffies ;
2021-08-26 21:59:44 +03:00
struct request * rq ;
2021-06-18 03:44:53 +03:00
enum dd_prio prio ;
2017-01-15 03:11:11 +03:00
spin_lock ( & dd - > lock ) ;
2021-09-28 01:03:28 +03:00
rq = dd_dispatch_prio_aged_requests ( dd , now ) ;
if ( rq )
goto unlock ;
/*
* Next , dispatch requests in priority order . Ignore lower priority
* requests if any higher priority requests are pending .
*/
2021-06-18 03:44:56 +03:00
for ( prio = 0 ; prio < = DD_PRIO_MAX ; prio + + ) {
2021-09-28 01:03:28 +03:00
rq = __dd_dispatch_request ( dd , & dd - > per_prio [ prio ] , now ) ;
if ( rq | | dd_queued ( dd , prio ) )
2021-06-18 03:44:53 +03:00
break ;
}
2021-09-28 01:03:28 +03:00
unlock :
2017-01-15 03:11:11 +03:00
spin_unlock ( & dd - > lock ) ;
2017-01-26 22:40:07 +03:00
return rq ;
2017-01-15 03:11:11 +03:00
}
2021-06-18 03:44:51 +03:00
/*
* Called by __blk_mq_alloc_request ( ) . The shallow_depth value set by this
* function is used by __blk_mq_get_tag ( ) .
*/
2022-07-14 21:06:34 +03:00
static void dd_limit_depth ( blk_opf_t opf , struct blk_mq_alloc_data * data )
2021-06-18 03:44:51 +03:00
{
struct deadline_data * dd = data - > q - > elevator - > elevator_data ;
/* Do not throttle synchronous reads. */
2022-07-14 21:06:34 +03:00
if ( op_is_sync ( opf ) & & ! op_is_write ( opf ) )
2021-06-18 03:44:51 +03:00
return ;
/*
* Throttle asynchronous requests and writes such that these requests
* do not block the allocation of synchronous requests .
*/
data - > shallow_depth = dd - > async_depth ;
}
/* Called by blk_mq_update_nr_requests(). */
static void dd_depth_updated ( struct blk_mq_hw_ctx * hctx )
{
struct request_queue * q = hctx - > queue ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
struct blk_mq_tags * tags = hctx - > sched_tags ;
dd - > async_depth = max ( 1UL , 3 * q - > nr_requests / 4 ) ;
2021-10-05 13:23:38 +03:00
sbitmap_queue_min_shallow_depth ( & tags - > bitmap_tags , dd - > async_depth ) ;
2021-06-18 03:44:51 +03:00
}
/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
static int dd_init_hctx ( struct blk_mq_hw_ctx * hctx , unsigned int hctx_idx )
{
dd_depth_updated ( hctx ) ;
return 0 ;
}
2021-06-18 03:44:48 +03:00
static void dd_exit_sched ( struct elevator_queue * e )
2017-01-15 03:11:11 +03:00
{
struct deadline_data * dd = e - > elevator_data ;
2021-06-18 03:44:53 +03:00
enum dd_prio prio ;
2017-01-15 03:11:11 +03:00
2021-06-18 03:44:53 +03:00
for ( prio = 0 ; prio < = DD_PRIO_MAX ; prio + + ) {
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ;
2021-09-28 01:03:27 +03:00
const struct io_stats_per_prio * stats = & per_prio - > stats ;
uint32_t queued ;
2021-06-18 03:44:53 +03:00
WARN_ON_ONCE ( ! list_empty ( & per_prio - > fifo_list [ DD_READ ] ) ) ;
WARN_ON_ONCE ( ! list_empty ( & per_prio - > fifo_list [ DD_WRITE ] ) ) ;
2021-09-28 01:03:27 +03:00
spin_lock ( & dd - > lock ) ;
queued = dd_queued ( dd , prio ) ;
spin_unlock ( & dd - > lock ) ;
WARN_ONCE ( queued ! = 0 ,
2021-09-28 01:03:26 +03:00
" statistics for priority %d: i %u m %u d %u c %u \n " ,
2021-09-28 01:03:27 +03:00
prio , stats - > inserted , stats - > merged ,
stats - > dispatched , atomic_read ( & stats - > completed ) ) ;
2021-06-18 03:44:53 +03:00
}
2017-01-15 03:11:11 +03:00
kfree ( dd ) ;
}
/*
2021-08-11 20:41:45 +03:00
* initialize elevator private data ( deadline_data ) .
2017-01-15 03:11:11 +03:00
*/
2021-06-18 03:44:48 +03:00
static int dd_init_sched ( struct request_queue * q , struct elevator_type * e )
2017-01-15 03:11:11 +03:00
{
struct deadline_data * dd ;
struct elevator_queue * eq ;
2021-06-18 03:44:53 +03:00
enum dd_prio prio ;
int ret = - ENOMEM ;
2017-01-15 03:11:11 +03:00
eq = elevator_alloc ( q , e ) ;
if ( ! eq )
2021-06-18 03:44:53 +03:00
return ret ;
2017-01-15 03:11:11 +03:00
dd = kzalloc_node ( sizeof ( * dd ) , GFP_KERNEL , q - > node ) ;
2021-06-18 03:44:53 +03:00
if ( ! dd )
goto put_eq ;
2017-01-15 03:11:11 +03:00
eq - > elevator_data = dd ;
2021-06-18 03:44:53 +03:00
for ( prio = 0 ; prio < = DD_PRIO_MAX ; prio + + ) {
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ;
INIT_LIST_HEAD ( & per_prio - > dispatch ) ;
INIT_LIST_HEAD ( & per_prio - > fifo_list [ DD_READ ] ) ;
INIT_LIST_HEAD ( & per_prio - > fifo_list [ DD_WRITE ] ) ;
per_prio - > sort_list [ DD_READ ] = RB_ROOT ;
per_prio - > sort_list [ DD_WRITE ] = RB_ROOT ;
}
2021-06-18 03:44:49 +03:00
dd - > fifo_expire [ DD_READ ] = read_expire ;
dd - > fifo_expire [ DD_WRITE ] = write_expire ;
2017-01-15 03:11:11 +03:00
dd - > writes_starved = writes_starved ;
dd - > front_merges = 1 ;
2021-06-18 03:44:52 +03:00
dd - > last_dir = DD_WRITE ;
2017-01-15 03:11:11 +03:00
dd - > fifo_batch = fifo_batch ;
2021-09-28 01:03:28 +03:00
dd - > prio_aging_expire = prio_aging_expire ;
2017-01-15 03:11:11 +03:00
spin_lock_init ( & dd - > lock ) ;
2017-12-21 09:43:40 +03:00
spin_lock_init ( & dd - > zone_lock ) ;
2017-01-15 03:11:11 +03:00
2022-06-16 04:44:00 +03:00
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set ( QUEUE_FLAG_SQ_SCHED , q ) ;
2017-01-15 03:11:11 +03:00
q - > elevator = eq ;
return 0 ;
2021-06-18 03:44:53 +03:00
put_eq :
kobject_put ( & eq - > kobj ) ;
return ret ;
2017-01-15 03:11:11 +03:00
}
2021-06-18 03:44:45 +03:00
/*
* Try to merge @ bio into an existing request . If @ bio has been merged into
* an existing request , store the pointer to that request into * @ rq .
*/
2017-01-15 03:11:11 +03:00
static int dd_request_merge ( struct request_queue * q , struct request * * rq ,
struct bio * bio )
{
struct deadline_data * dd = q - > elevator - > elevator_data ;
2021-06-18 03:44:53 +03:00
const u8 ioprio_class = IOPRIO_PRIO_CLASS ( bio - > bi_ioprio ) ;
const enum dd_prio prio = ioprio_class_to_prio [ ioprio_class ] ;
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ;
2017-01-15 03:11:11 +03:00
sector_t sector = bio_end_sector ( bio ) ;
struct request * __rq ;
if ( ! dd - > front_merges )
return ELEVATOR_NO_MERGE ;
2021-06-18 03:44:53 +03:00
__rq = elv_rb_find ( & per_prio - > sort_list [ bio_data_dir ( bio ) ] , sector ) ;
2017-01-15 03:11:11 +03:00
if ( __rq ) {
BUG_ON ( sector ! = blk_rq_pos ( __rq ) ) ;
if ( elv_bio_merge_ok ( __rq , bio ) ) {
* rq = __rq ;
2021-07-29 06:42:26 +03:00
if ( blk_discard_mergable ( __rq ) )
return ELEVATOR_DISCARD_MERGE ;
2017-01-15 03:11:11 +03:00
return ELEVATOR_FRONT_MERGE ;
}
}
return ELEVATOR_NO_MERGE ;
}
2021-06-18 03:44:45 +03:00
/*
* Attempt to merge a bio into an existing request . This function is called
* before @ bio is associated with a request .
*/
2021-05-11 03:05:35 +03:00
static bool dd_bio_merge ( struct request_queue * q , struct bio * bio ,
2019-06-06 13:29:01 +03:00
unsigned int nr_segs )
2017-01-15 03:11:11 +03:00
{
struct deadline_data * dd = q - > elevator - > elevator_data ;
2017-02-03 19:48:28 +03:00
struct request * free = NULL ;
bool ret ;
2017-01-15 03:11:11 +03:00
spin_lock ( & dd - > lock ) ;
2019-06-06 13:29:01 +03:00
ret = blk_mq_sched_try_merge ( q , bio , nr_segs , & free ) ;
2017-01-15 03:11:11 +03:00
spin_unlock ( & dd - > lock ) ;
2017-02-03 19:48:28 +03:00
if ( free )
blk_mq_free_request ( free ) ;
2017-01-15 03:11:11 +03:00
return ret ;
}
/*
* add rq to rbtree and fifo
*/
static void dd_insert_request ( struct blk_mq_hw_ctx * hctx , struct request * rq ,
2023-05-17 20:42:26 +03:00
blk_insert_t flags , struct list_head * free )
2017-01-15 03:11:11 +03:00
{
struct request_queue * q = hctx - > queue ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
2021-06-18 03:44:49 +03:00
const enum dd_data_dir data_dir = rq_data_dir ( rq ) ;
2021-06-18 03:44:53 +03:00
u16 ioprio = req_get_ioprio ( rq ) ;
u8 ioprio_class = IOPRIO_PRIO_CLASS ( ioprio ) ;
struct dd_per_prio * per_prio ;
enum dd_prio prio ;
2017-01-15 03:11:11 +03:00
2021-06-18 03:44:46 +03:00
lockdep_assert_held ( & dd - > lock ) ;
2017-12-21 09:43:40 +03:00
/*
* This may be a requeue of a write request that has locked its
* target zone . If it is the case , this releases the zone lock .
*/
blk_req_zone_write_unlock ( rq ) ;
2021-06-18 03:44:53 +03:00
prio = ioprio_class_to_prio [ ioprio_class ] ;
2021-09-28 01:03:27 +03:00
per_prio = & dd - > per_prio [ prio ] ;
2021-09-28 01:03:25 +03:00
if ( ! rq - > elv . priv [ 0 ] ) {
2021-09-28 01:03:27 +03:00
per_prio - > stats . inserted + + ;
2021-09-28 01:03:25 +03:00
rq - > elv . priv [ 0 ] = ( void * ) ( uintptr_t ) 1 ;
}
2021-06-18 03:44:53 +03:00
2023-05-17 20:42:26 +03:00
if ( blk_mq_sched_try_insert_merge ( q , rq , free ) )
2017-01-15 03:11:11 +03:00
return ;
2021-02-22 08:29:59 +03:00
trace_block_rq_insert ( rq ) ;
2017-01-15 03:11:11 +03:00
2023-04-13 09:40:56 +03:00
if ( flags & BLK_MQ_INSERT_AT_HEAD ) {
2021-06-18 03:44:53 +03:00
list_add ( & rq - > queuelist , & per_prio - > dispatch ) ;
2022-05-13 20:13:07 +03:00
rq - > fifo_time = jiffies ;
2017-01-15 03:11:11 +03:00
} else {
2023-05-17 20:42:28 +03:00
struct list_head * insert_before ;
2021-06-18 03:44:53 +03:00
deadline_add_rq_rb ( per_prio , rq ) ;
2017-01-15 03:11:11 +03:00
if ( rq_mergeable ( rq ) ) {
elv_rqhash_add ( q , rq ) ;
if ( ! q - > last_merge )
q - > last_merge = rq ;
}
/*
* set expire time and add to fifo list
*/
rq - > fifo_time = jiffies + dd - > fifo_expire [ data_dir ] ;
2023-05-17 20:42:28 +03:00
insert_before = & per_prio - > fifo_list [ data_dir ] ;
# ifdef CONFIG_BLK_DEV_ZONED
/*
* Insert zoned writes such that requests are sorted by
* position per zone .
*/
if ( blk_rq_is_seq_zoned_write ( rq ) ) {
struct request * rq2 = deadline_latter_request ( rq ) ;
if ( rq2 & & blk_rq_zone_no ( rq2 ) = = blk_rq_zone_no ( rq ) )
insert_before = & rq2 - > queuelist ;
}
# endif
list_add_tail ( & rq - > queuelist , insert_before ) ;
2017-01-15 03:11:11 +03:00
}
}
2021-06-18 03:44:45 +03:00
/*
2023-04-13 09:40:43 +03:00
* Called from blk_mq_insert_request ( ) or blk_mq_dispatch_plug_list ( ) .
2021-06-18 03:44:45 +03:00
*/
2017-01-15 03:11:11 +03:00
static void dd_insert_requests ( struct blk_mq_hw_ctx * hctx ,
2023-04-13 09:40:56 +03:00
struct list_head * list ,
blk_insert_t flags )
2017-01-15 03:11:11 +03:00
{
struct request_queue * q = hctx - > queue ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
2023-05-17 20:42:26 +03:00
LIST_HEAD ( free ) ;
2017-01-15 03:11:11 +03:00
spin_lock ( & dd - > lock ) ;
while ( ! list_empty ( list ) ) {
struct request * rq ;
rq = list_first_entry ( list , struct request , queuelist ) ;
list_del_init ( & rq - > queuelist ) ;
2023-05-17 20:42:26 +03:00
dd_insert_request ( hctx , rq , flags , & free ) ;
2017-01-15 03:11:11 +03:00
}
spin_unlock ( & dd - > lock ) ;
2023-05-17 20:42:26 +03:00
blk_mq_free_requests ( & free ) ;
2017-01-15 03:11:11 +03:00
}
2021-08-24 20:05:20 +03:00
/* Callback from inside blk_mq_rq_ctx_init(). */
2020-05-29 16:53:08 +03:00
static void dd_prepare_request ( struct request * rq )
2018-02-28 20:35:29 +03:00
{
2021-08-24 20:05:20 +03:00
rq - > elv . priv [ 0 ] = NULL ;
2018-02-28 20:35:29 +03:00
}
2022-11-24 05:12:07 +03:00
static bool dd_has_write_work ( struct blk_mq_hw_ctx * hctx )
{
struct deadline_data * dd = hctx - > queue - > elevator - > elevator_data ;
enum dd_prio p ;
for ( p = 0 ; p < = DD_PRIO_MAX ; p + + )
if ( ! list_empty_careful ( & dd - > per_prio [ p ] . fifo_list [ DD_WRITE ] ) )
return true ;
return false ;
}
2017-12-21 09:43:40 +03:00
/*
2021-06-18 03:44:45 +03:00
* Callback from inside blk_mq_free_request ( ) .
*
2017-12-21 09:43:40 +03:00
* For zoned block devices , write unlock the target zone of
* completed write requests . Do this while holding the zone lock
* spinlock so that the zone is never unlocked while deadline_fifo_request ( )
2018-02-28 20:35:29 +03:00
* or deadline_next_request ( ) are executing . This function is called for
* all requests , whether or not these requests complete successfully .
2019-08-28 07:40:20 +03:00
*
* For a zoned block device , __dd_dispatch_request ( ) may have stopped
* dispatching requests if all the queued requests are write requests directed
* at zones that are already locked due to on - going write requests . To ensure
* write request dispatch progress in this case , mark the queue as needing a
* restart to ensure that the queue is run again after completion of the
* request and zones being unlocked .
2017-12-21 09:43:40 +03:00
*/
2018-02-28 20:35:29 +03:00
static void dd_finish_request ( struct request * rq )
2017-12-21 09:43:40 +03:00
{
struct request_queue * q = rq - > q ;
2021-06-18 03:44:53 +03:00
struct deadline_data * dd = q - > elevator - > elevator_data ;
const u8 ioprio_class = dd_rq_ioclass ( rq ) ;
const enum dd_prio prio = ioprio_class_to_prio [ ioprio_class ] ;
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ;
2017-12-21 09:43:40 +03:00
2021-08-24 20:05:20 +03:00
/*
* The block layer core may call dd_finish_request ( ) without having
2021-09-28 01:03:25 +03:00
* called dd_insert_requests ( ) . Skip requests that bypassed I / O
* scheduling . See also blk_mq_request_bypass_insert ( ) .
2021-08-24 20:05:20 +03:00
*/
2021-09-28 01:03:25 +03:00
if ( ! rq - > elv . priv [ 0 ] )
return ;
2021-09-28 01:03:27 +03:00
atomic_inc ( & per_prio - > stats . completed ) ;
2021-06-18 03:44:54 +03:00
2017-12-21 09:43:40 +03:00
if ( blk_queue_is_zoned ( q ) ) {
unsigned long flags ;
spin_lock_irqsave ( & dd - > zone_lock , flags ) ;
blk_req_zone_write_unlock ( rq ) ;
spin_unlock_irqrestore ( & dd - > zone_lock , flags ) ;
2022-11-24 05:12:07 +03:00
if ( dd_has_write_work ( rq - > mq_hctx ) )
blk_mq_sched_mark_restart_hctx ( rq - > mq_hctx ) ;
2017-12-21 09:43:40 +03:00
}
}
2021-06-18 03:44:53 +03:00
static bool dd_has_work_for_prio ( struct dd_per_prio * per_prio )
{
return ! list_empty_careful ( & per_prio - > dispatch ) | |
! list_empty_careful ( & per_prio - > fifo_list [ DD_READ ] ) | |
! list_empty_careful ( & per_prio - > fifo_list [ DD_WRITE ] ) ;
}
2017-01-15 03:11:11 +03:00
static bool dd_has_work ( struct blk_mq_hw_ctx * hctx )
{
struct deadline_data * dd = hctx - > queue - > elevator - > elevator_data ;
2021-06-18 03:44:53 +03:00
enum dd_prio prio ;
for ( prio = 0 ; prio < = DD_PRIO_MAX ; prio + + )
if ( dd_has_work_for_prio ( & dd - > per_prio [ prio ] ) )
return true ;
2017-01-15 03:11:11 +03:00
2021-06-18 03:44:53 +03:00
return false ;
2017-01-15 03:11:11 +03:00
}
/*
* sysfs parts below
*/
2021-06-18 03:44:50 +03:00
# define SHOW_INT(__FUNC, __VAR) \
2017-01-15 03:11:11 +03:00
static ssize_t __FUNC ( struct elevator_queue * e , char * page ) \
{ \
struct deadline_data * dd = e - > elevator_data ; \
2021-06-18 03:44:50 +03:00
\
return sysfs_emit ( page , " %d \n " , __VAR ) ; \
2017-01-15 03:11:11 +03:00
}
2021-06-18 03:44:50 +03:00
# define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
SHOW_JIFFIES ( deadline_read_expire_show , dd - > fifo_expire [ DD_READ ] ) ;
SHOW_JIFFIES ( deadline_write_expire_show , dd - > fifo_expire [ DD_WRITE ] ) ;
2021-09-28 01:03:28 +03:00
SHOW_JIFFIES ( deadline_prio_aging_expire_show , dd - > prio_aging_expire ) ;
2021-06-18 03:44:50 +03:00
SHOW_INT ( deadline_writes_starved_show , dd - > writes_starved ) ;
SHOW_INT ( deadline_front_merges_show , dd - > front_merges ) ;
2022-01-20 20:28:13 +03:00
SHOW_INT ( deadline_async_depth_show , dd - > async_depth ) ;
2021-06-18 03:44:50 +03:00
SHOW_INT ( deadline_fifo_batch_show , dd - > fifo_batch ) ;
# undef SHOW_INT
# undef SHOW_JIFFIES
2017-01-15 03:11:11 +03:00
# define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t __FUNC ( struct elevator_queue * e , const char * page , size_t count ) \
{ \
struct deadline_data * dd = e - > elevator_data ; \
2021-06-18 03:44:50 +03:00
int __data , __ret ; \
\
__ret = kstrtoint ( page , 0 , & __data ) ; \
if ( __ret < 0 ) \
return __ret ; \
2017-01-15 03:11:11 +03:00
if ( __data < ( MIN ) ) \
__data = ( MIN ) ; \
else if ( __data > ( MAX ) ) \
__data = ( MAX ) ; \
2021-06-18 03:44:50 +03:00
* ( __PTR ) = __CONV ( __data ) ; \
2017-08-24 20:11:33 +03:00
return count ; \
2017-01-15 03:11:11 +03:00
}
2021-06-18 03:44:50 +03:00
# define STORE_INT(__FUNC, __PTR, MIN, MAX) \
STORE_FUNCTION ( __FUNC , __PTR , MIN , MAX , )
# define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
STORE_FUNCTION ( __FUNC , __PTR , MIN , MAX , msecs_to_jiffies )
STORE_JIFFIES ( deadline_read_expire_store , & dd - > fifo_expire [ DD_READ ] , 0 , INT_MAX ) ;
STORE_JIFFIES ( deadline_write_expire_store , & dd - > fifo_expire [ DD_WRITE ] , 0 , INT_MAX ) ;
2021-09-28 01:03:28 +03:00
STORE_JIFFIES ( deadline_prio_aging_expire_store , & dd - > prio_aging_expire , 0 , INT_MAX ) ;
2021-06-18 03:44:50 +03:00
STORE_INT ( deadline_writes_starved_store , & dd - > writes_starved , INT_MIN , INT_MAX ) ;
STORE_INT ( deadline_front_merges_store , & dd - > front_merges , 0 , 1 ) ;
2022-01-20 20:28:13 +03:00
STORE_INT ( deadline_async_depth_store , & dd - > async_depth , 1 , INT_MAX ) ;
2021-06-18 03:44:50 +03:00
STORE_INT ( deadline_fifo_batch_store , & dd - > fifo_batch , 0 , INT_MAX ) ;
2017-01-15 03:11:11 +03:00
# undef STORE_FUNCTION
2021-06-18 03:44:50 +03:00
# undef STORE_INT
# undef STORE_JIFFIES
2017-01-15 03:11:11 +03:00
# define DD_ATTR(name) \
2018-05-24 22:38:59 +03:00
__ATTR ( name , 0644 , deadline_ # # name # # _show , deadline_ # # name # # _store )
2017-01-15 03:11:11 +03:00
static struct elv_fs_entry deadline_attrs [ ] = {
DD_ATTR ( read_expire ) ,
DD_ATTR ( write_expire ) ,
DD_ATTR ( writes_starved ) ,
DD_ATTR ( front_merges ) ,
2021-06-18 03:44:51 +03:00
DD_ATTR ( async_depth ) ,
2017-01-15 03:11:11 +03:00
DD_ATTR ( fifo_batch ) ,
2021-09-28 01:03:28 +03:00
DD_ATTR ( prio_aging_expire ) ,
2017-01-15 03:11:11 +03:00
__ATTR_NULL
} ;
2017-05-04 10:31:34 +03:00
# ifdef CONFIG_BLK_DEBUG_FS
2021-06-18 03:44:53 +03:00
# define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
2017-05-04 10:31:34 +03:00
static void * deadline_ # # name # # _fifo_start ( struct seq_file * m , \
loff_t * pos ) \
__acquires ( & dd - > lock ) \
{ \
struct request_queue * q = m - > private ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
2021-06-18 03:44:53 +03:00
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ; \
2017-05-04 10:31:34 +03:00
\
spin_lock ( & dd - > lock ) ; \
2021-06-18 03:44:53 +03:00
return seq_list_start ( & per_prio - > fifo_list [ data_dir ] , * pos ) ; \
2017-05-04 10:31:34 +03:00
} \
\
static void * deadline_ # # name # # _fifo_next ( struct seq_file * m , void * v , \
loff_t * pos ) \
{ \
struct request_queue * q = m - > private ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
2021-06-18 03:44:53 +03:00
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ; \
2017-05-04 10:31:34 +03:00
\
2021-06-18 03:44:53 +03:00
return seq_list_next ( v , & per_prio - > fifo_list [ data_dir ] , pos ) ; \
2017-05-04 10:31:34 +03:00
} \
\
static void deadline_ # # name # # _fifo_stop ( struct seq_file * m , void * v ) \
__releases ( & dd - > lock ) \
{ \
struct request_queue * q = m - > private ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
\
spin_unlock ( & dd - > lock ) ; \
} \
\
static const struct seq_operations deadline_ # # name # # _fifo_seq_ops = { \
. start = deadline_ # # name # # _fifo_start , \
. next = deadline_ # # name # # _fifo_next , \
. stop = deadline_ # # name # # _fifo_stop , \
. show = blk_mq_debugfs_rq_show , \
} ; \
\
static int deadline_ # # name # # _next_rq_show ( void * data , \
struct seq_file * m ) \
{ \
struct request_queue * q = data ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
2021-06-18 03:44:53 +03:00
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ; \
2023-05-17 20:42:27 +03:00
struct request * rq ; \
2017-05-04 10:31:34 +03:00
\
2023-05-17 20:42:27 +03:00
rq = deadline_from_pos ( per_prio , data_dir , \
per_prio - > latest_pos [ data_dir ] ) ; \
2017-05-04 10:31:34 +03:00
if ( rq ) \
__blk_mq_debugfs_rq_show ( m , rq ) ; \
return 0 ; \
}
2021-06-18 03:44:53 +03:00
DEADLINE_DEBUGFS_DDIR_ATTRS ( DD_RT_PRIO , DD_READ , read0 ) ;
DEADLINE_DEBUGFS_DDIR_ATTRS ( DD_RT_PRIO , DD_WRITE , write0 ) ;
DEADLINE_DEBUGFS_DDIR_ATTRS ( DD_BE_PRIO , DD_READ , read1 ) ;
DEADLINE_DEBUGFS_DDIR_ATTRS ( DD_BE_PRIO , DD_WRITE , write1 ) ;
DEADLINE_DEBUGFS_DDIR_ATTRS ( DD_IDLE_PRIO , DD_READ , read2 ) ;
DEADLINE_DEBUGFS_DDIR_ATTRS ( DD_IDLE_PRIO , DD_WRITE , write2 ) ;
2017-05-04 10:31:34 +03:00
# undef DEADLINE_DEBUGFS_DDIR_ATTRS
static int deadline_batching_show ( void * data , struct seq_file * m )
{
struct request_queue * q = data ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
seq_printf ( m , " %u \n " , dd - > batching ) ;
return 0 ;
}
static int deadline_starved_show ( void * data , struct seq_file * m )
{
struct request_queue * q = data ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
seq_printf ( m , " %u \n " , dd - > starved ) ;
return 0 ;
}
2021-06-18 03:44:51 +03:00
static int dd_async_depth_show ( void * data , struct seq_file * m )
{
struct request_queue * q = data ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
seq_printf ( m , " %u \n " , dd - > async_depth ) ;
return 0 ;
}
2021-06-18 03:44:54 +03:00
static int dd_queued_show ( void * data , struct seq_file * m )
{
struct request_queue * q = data ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
2021-09-28 01:03:27 +03:00
u32 rt , be , idle ;
spin_lock ( & dd - > lock ) ;
rt = dd_queued ( dd , DD_RT_PRIO ) ;
be = dd_queued ( dd , DD_BE_PRIO ) ;
idle = dd_queued ( dd , DD_IDLE_PRIO ) ;
spin_unlock ( & dd - > lock ) ;
seq_printf ( m , " %u %u %u \n " , rt , be , idle ) ;
2021-06-18 03:44:54 +03:00
return 0 ;
}
/* Number of requests owned by the block driver for a given priority. */
static u32 dd_owned_by_driver ( struct deadline_data * dd , enum dd_prio prio )
{
2021-09-28 01:03:27 +03:00
const struct io_stats_per_prio * stats = & dd - > per_prio [ prio ] . stats ;
lockdep_assert_held ( & dd - > lock ) ;
return stats - > dispatched + stats - > merged -
atomic_read ( & stats - > completed ) ;
2021-06-18 03:44:54 +03:00
}
static int dd_owned_by_driver_show ( void * data , struct seq_file * m )
{
struct request_queue * q = data ;
struct deadline_data * dd = q - > elevator - > elevator_data ;
2021-09-28 01:03:27 +03:00
u32 rt , be , idle ;
spin_lock ( & dd - > lock ) ;
rt = dd_owned_by_driver ( dd , DD_RT_PRIO ) ;
be = dd_owned_by_driver ( dd , DD_BE_PRIO ) ;
idle = dd_owned_by_driver ( dd , DD_IDLE_PRIO ) ;
spin_unlock ( & dd - > lock ) ;
seq_printf ( m , " %u %u %u \n " , rt , be , idle ) ;
2021-06-18 03:44:54 +03:00
return 0 ;
}
2021-06-18 03:44:53 +03:00
# define DEADLINE_DISPATCH_ATTR(prio) \
static void * deadline_dispatch # # prio # # _start ( struct seq_file * m , \
loff_t * pos ) \
__acquires ( & dd - > lock ) \
{ \
struct request_queue * q = m - > private ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ; \
\
spin_lock ( & dd - > lock ) ; \
return seq_list_start ( & per_prio - > dispatch , * pos ) ; \
} \
\
static void * deadline_dispatch # # prio # # _next ( struct seq_file * m , \
void * v , loff_t * pos ) \
{ \
struct request_queue * q = m - > private ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
struct dd_per_prio * per_prio = & dd - > per_prio [ prio ] ; \
\
return seq_list_next ( v , & per_prio - > dispatch , pos ) ; \
} \
\
static void deadline_dispatch # # prio # # _stop ( struct seq_file * m , void * v ) \
__releases ( & dd - > lock ) \
{ \
struct request_queue * q = m - > private ; \
struct deadline_data * dd = q - > elevator - > elevator_data ; \
\
spin_unlock ( & dd - > lock ) ; \
} \
\
static const struct seq_operations deadline_dispatch # # prio # # _seq_ops = { \
. start = deadline_dispatch # # prio # # _start , \
. next = deadline_dispatch # # prio # # _next , \
. stop = deadline_dispatch # # prio # # _stop , \
. show = blk_mq_debugfs_rq_show , \
2017-05-04 10:31:34 +03:00
}
2021-06-18 03:44:53 +03:00
DEADLINE_DISPATCH_ATTR ( 0 ) ;
DEADLINE_DISPATCH_ATTR ( 1 ) ;
DEADLINE_DISPATCH_ATTR ( 2 ) ;
# undef DEADLINE_DISPATCH_ATTR
2017-05-04 10:31:34 +03:00
2021-06-18 03:44:53 +03:00
# define DEADLINE_QUEUE_DDIR_ATTRS(name) \
{ # name " _fifo_list " , 0400 , \
. seq_ops = & deadline_ # # name # # _fifo_seq_ops }
# define DEADLINE_NEXT_RQ_ATTR(name) \
2017-05-04 10:31:34 +03:00
{ # name " _next_rq " , 0400 , deadline_ # # name # # _next_rq_show }
static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs [ ] = {
2021-06-18 03:44:53 +03:00
DEADLINE_QUEUE_DDIR_ATTRS ( read0 ) ,
DEADLINE_QUEUE_DDIR_ATTRS ( write0 ) ,
DEADLINE_QUEUE_DDIR_ATTRS ( read1 ) ,
DEADLINE_QUEUE_DDIR_ATTRS ( write1 ) ,
DEADLINE_QUEUE_DDIR_ATTRS ( read2 ) ,
DEADLINE_QUEUE_DDIR_ATTRS ( write2 ) ,
DEADLINE_NEXT_RQ_ATTR ( read0 ) ,
DEADLINE_NEXT_RQ_ATTR ( write0 ) ,
DEADLINE_NEXT_RQ_ATTR ( read1 ) ,
DEADLINE_NEXT_RQ_ATTR ( write1 ) ,
DEADLINE_NEXT_RQ_ATTR ( read2 ) ,
DEADLINE_NEXT_RQ_ATTR ( write2 ) ,
2017-05-04 10:31:34 +03:00
{ " batching " , 0400 , deadline_batching_show } ,
{ " starved " , 0400 , deadline_starved_show } ,
2021-06-18 03:44:51 +03:00
{ " async_depth " , 0400 , dd_async_depth_show } ,
2021-06-18 03:44:53 +03:00
{ " dispatch0 " , 0400 , . seq_ops = & deadline_dispatch0_seq_ops } ,
{ " dispatch1 " , 0400 , . seq_ops = & deadline_dispatch1_seq_ops } ,
{ " dispatch2 " , 0400 , . seq_ops = & deadline_dispatch2_seq_ops } ,
2021-06-18 03:44:54 +03:00
{ " owned_by_driver " , 0400 , dd_owned_by_driver_show } ,
{ " queued " , 0400 , dd_queued_show } ,
2017-05-04 10:31:34 +03:00
{ } ,
} ;
# undef DEADLINE_QUEUE_DDIR_ATTRS
# endif
2017-01-15 03:11:11 +03:00
static struct elevator_type mq_deadline = {
2018-11-02 01:41:41 +03:00
. ops = {
2021-06-18 03:44:51 +03:00
. depth_updated = dd_depth_updated ,
. limit_depth = dd_limit_depth ,
2017-01-15 03:11:11 +03:00
. insert_requests = dd_insert_requests ,
2017-01-26 22:40:07 +03:00
. dispatch_request = dd_dispatch_request ,
2018-02-28 20:35:29 +03:00
. prepare_request = dd_prepare_request ,
. finish_request = dd_finish_request ,
2017-01-15 03:11:11 +03:00
. next_request = elv_rb_latter_request ,
. former_request = elv_rb_former_request ,
. bio_merge = dd_bio_merge ,
. request_merge = dd_request_merge ,
. requests_merged = dd_merged_requests ,
. request_merged = dd_request_merged ,
. has_work = dd_has_work ,
2021-06-18 03:44:48 +03:00
. init_sched = dd_init_sched ,
. exit_sched = dd_exit_sched ,
2021-06-18 03:44:51 +03:00
. init_hctx = dd_init_hctx ,
2017-01-15 03:11:11 +03:00
} ,
2017-05-04 10:31:34 +03:00
# ifdef CONFIG_BLK_DEBUG_FS
. queue_debugfs_attrs = deadline_queue_debugfs_attrs ,
# endif
2017-01-15 03:11:11 +03:00
. elevator_attrs = deadline_attrs ,
. elevator_name = " mq-deadline " ,
2017-10-25 18:47:20 +03:00
. elevator_alias = " deadline " ,
2019-09-05 12:51:31 +03:00
. elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE ,
2017-01-15 03:11:11 +03:00
. elevator_owner = THIS_MODULE ,
} ;
2017-08-13 20:03:15 +03:00
MODULE_ALIAS ( " mq-deadline-iosched " ) ;
2017-01-15 03:11:11 +03:00
static int __init deadline_init ( void )
{
2021-08-11 20:41:45 +03:00
return elv_register ( & mq_deadline ) ;
2017-01-15 03:11:11 +03:00
}
static void __exit deadline_exit ( void )
{
elv_unregister ( & mq_deadline ) ;
}
module_init ( deadline_init ) ;
module_exit ( deadline_exit ) ;
2021-06-18 03:44:53 +03:00
MODULE_AUTHOR ( " Jens Axboe, Damien Le Moal and Bart Van Assche " ) ;
2017-01-15 03:11:11 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " MQ deadline IO scheduler " ) ;