2016-05-12 16:28:10 -04:00
/*
* Copyright ( C ) 2016 Red Hat , Inc . All rights reserved .
*
* This file is released under the GPL .
*/
# include "dm-core.h"
# include "dm-rq.h"
# include <linux/elevator.h> /* for rq_end_sector() */
# include <linux/blk-mq.h>
# define DM_MSG_PREFIX "core-rq"
# define DM_MQ_NR_HW_QUEUES 1
# define DM_MQ_QUEUE_DEPTH 2048
static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES ;
static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH ;
/*
* Request - based DM ' s mempools ' reserved IOs set by the user .
*/
# define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS ;
unsigned dm_get_reserved_rq_based_ios ( void )
{
return __dm_get_module_param ( & reserved_rq_based_ios ,
RESERVED_REQUEST_BASED_IOS , DM_RESERVED_MAX_IOS ) ;
}
EXPORT_SYMBOL_GPL ( dm_get_reserved_rq_based_ios ) ;
static unsigned dm_get_blk_mq_nr_hw_queues ( void )
{
return __dm_get_module_param ( & dm_mq_nr_hw_queues , 1 , 32 ) ;
}
static unsigned dm_get_blk_mq_queue_depth ( void )
{
return __dm_get_module_param ( & dm_mq_queue_depth ,
DM_MQ_QUEUE_DEPTH , BLK_MQ_MAX_DEPTH ) ;
}
int dm_request_based ( struct mapped_device * md )
{
2018-11-15 12:22:51 -07:00
return queue_is_mq ( md - > queue ) ;
2016-05-12 16:28:10 -04:00
}
2018-10-10 20:49:26 -06:00
void dm_start_queue ( struct request_queue * q )
2016-09-01 11:59:33 -04:00
{
2017-06-06 23:22:04 +08:00
blk_mq_unquiesce_queue ( q ) ;
2016-09-01 11:59:33 -04:00
blk_mq_kick_requeue_list ( q ) ;
}
2018-10-10 20:49:26 -06:00
void dm_stop_queue ( struct request_queue * q )
2016-08-31 15:18:11 -07:00
{
2016-10-28 17:22:00 -07:00
if ( blk_mq_queue_stopped ( q ) )
2016-05-12 16:28:10 -04:00
return ;
2016-08-31 15:18:11 -07:00
2016-10-28 17:22:16 -07:00
blk_mq_quiesce_queue ( q ) ;
2016-05-12 16:28:10 -04:00
}
/*
* Partial completion handling for request - based dm
*/
static void end_clone_bio ( struct bio * clone )
{
struct dm_rq_clone_bio_info * info =
container_of ( clone , struct dm_rq_clone_bio_info , clone ) ;
struct dm_rq_target_io * tio = info - > tio ;
unsigned int nr_bytes = info - > orig - > bi_iter . bi_size ;
2017-06-03 09:38:06 +02:00
blk_status_t error = clone - > bi_status ;
2017-08-24 20:19:52 +08:00
bool is_last = ! clone - > bi_next ;
2016-05-12 16:28:10 -04:00
bio_put ( clone ) ;
if ( tio - > error )
/*
* An error has already been detected on the request .
* Once error occurred , just let clone - > end_io ( ) handle
* the remainder .
*/
return ;
else if ( error ) {
/*
* Don ' t notice the error to the upper layer yet .
* The error handling decision is made by the target driver ,
* when the request is completed .
*/
tio - > error = error ;
2017-08-24 20:19:52 +08:00
goto exit ;
2016-05-12 16:28:10 -04:00
}
/*
* I / O for the bio successfully completed .
* Notice the data completion to the upper layer .
*/
2017-08-24 20:19:52 +08:00
tio - > completed + = nr_bytes ;
2016-05-12 16:28:10 -04:00
/*
* Update the original request .
* Do not use blk_end_request ( ) here , because it may complete
* the original request before the clone , and break the ordering .
*/
2017-08-24 20:19:52 +08:00
if ( is_last )
exit :
blk_update_request ( tio - > orig , BLK_STS_OK , tio - > completed ) ;
2016-05-12 16:28:10 -04:00
}
static struct dm_rq_target_io * tio_from_request ( struct request * rq )
{
2017-01-22 18:32:46 +01:00
return blk_mq_rq_to_pdu ( rq ) ;
2016-05-12 16:28:10 -04:00
}
static void rq_end_stats ( struct mapped_device * md , struct request * orig )
{
if ( unlikely ( dm_stats_used ( & md - > stats ) ) ) {
struct dm_rq_target_io * tio = tio_from_request ( orig ) ;
tio - > duration_jiffies = jiffies - tio - > duration_jiffies ;
dm_stats_account_io ( & md - > stats , rq_data_dir ( orig ) ,
blk_rq_pos ( orig ) , tio - > n_sectors , true ,
tio - > duration_jiffies , & tio - > stats_aux ) ;
}
}
/*
* Don ' t touch any member of the md after calling this function because
* the md may be freed in dm_put ( ) at the end of this function .
* Or do dm_get ( ) before calling this function and dm_put ( ) later .
*/
2018-11-08 14:59:41 -05:00
static void rq_completed ( struct mapped_device * md )
2016-05-12 16:28:10 -04:00
{
/* nudge anyone waiting on suspend queue */
2018-12-11 09:10:26 -05:00
if ( unlikely ( waitqueue_active ( & md - > wait ) ) )
wake_up ( & md - > wait ) ;
2016-05-12 16:28:10 -04:00
/*
* dm_put ( ) must be at the end of this function . See the comment above
*/
dm_put ( md ) ;
}
/*
* Complete the clone and the original request .
* Must be called without clone ' s queue lock held ,
* see end_clone_request ( ) for more details .
*/
2017-06-03 09:38:04 +02:00
static void dm_end_request ( struct request * clone , blk_status_t error )
2016-05-12 16:28:10 -04:00
{
struct dm_rq_target_io * tio = clone - > end_io_data ;
struct mapped_device * md = tio - > md ;
struct request * rq = tio - > orig ;
2017-01-22 18:32:46 +01:00
blk_rq_unprep_clone ( clone ) ;
tio - > ti - > type - > release_clone_rq ( clone ) ;
2016-05-12 16:28:10 -04:00
rq_end_stats ( md , rq ) ;
2018-10-10 20:49:26 -06:00
blk_mq_end_request ( rq , error ) ;
2018-11-08 14:59:41 -05:00
rq_completed ( md ) ;
2016-05-12 16:28:10 -04:00
}
2016-09-14 10:36:39 -04:00
static void __dm_mq_kick_requeue_list ( struct request_queue * q , unsigned long msecs )
2016-05-12 16:28:10 -04:00
{
2016-10-28 17:20:32 -07:00
blk_mq_delay_kick_requeue_list ( q , msecs ) ;
2016-05-12 16:28:10 -04:00
}
2016-09-14 10:36:39 -04:00
void dm_mq_kick_requeue_list ( struct mapped_device * md )
{
__dm_mq_kick_requeue_list ( dm_get_md_queue ( md ) , 0 ) ;
}
EXPORT_SYMBOL ( dm_mq_kick_requeue_list ) ;
static void dm_mq_delay_requeue_request ( struct request * rq , unsigned long msecs )
{
2016-10-28 17:21:41 -07:00
blk_mq_requeue_request ( rq , false ) ;
2016-09-14 10:36:39 -04:00
__dm_mq_kick_requeue_list ( rq - > q , msecs ) ;
}
2016-09-13 12:16:14 -04:00
static void dm_requeue_original_request ( struct dm_rq_target_io * tio , bool delay_requeue )
2016-05-12 16:28:10 -04:00
{
2016-09-13 12:16:14 -04:00
struct mapped_device * md = tio - > md ;
struct request * rq = tio - > orig ;
2017-08-09 11:32:16 -07:00
unsigned long delay_ms = delay_requeue ? 100 : 0 ;
2016-05-12 16:28:10 -04:00
rq_end_stats ( md , rq ) ;
2017-01-22 18:32:46 +01:00
if ( tio - > clone ) {
blk_rq_unprep_clone ( tio - > clone ) ;
tio - > ti - > type - > release_clone_rq ( tio - > clone ) ;
}
2016-05-12 16:28:10 -04:00
2018-10-10 20:49:26 -06:00
dm_mq_delay_requeue_request ( rq , delay_ms ) ;
2018-11-08 14:59:41 -05:00
rq_completed ( md ) ;
2016-05-12 16:28:10 -04:00
}
2017-06-03 09:38:04 +02:00
static void dm_done ( struct request * clone , blk_status_t error , bool mapped )
2016-05-12 16:28:10 -04:00
{
2017-04-26 09:40:37 +02:00
int r = DM_ENDIO_DONE ;
2016-05-12 16:28:10 -04:00
struct dm_rq_target_io * tio = clone - > end_io_data ;
dm_request_endio_fn rq_end_io = NULL ;
if ( tio - > ti ) {
rq_end_io = tio - > ti - > type - > rq_end_io ;
if ( mapped & & rq_end_io )
r = rq_end_io ( tio - > ti , clone , error , & tio - > info ) ;
}
2017-06-03 09:38:04 +02:00
if ( unlikely ( error = = BLK_STS_TARGET ) ) {
2017-04-05 19:21:05 +02:00
if ( req_op ( clone ) = = REQ_OP_WRITE_SAME & &
! clone - > q - > limits . max_write_same_sectors )
disable_write_same ( tio - > md ) ;
if ( req_op ( clone ) = = REQ_OP_WRITE_ZEROES & &
! clone - > q - > limits . max_write_zeroes_sectors )
disable_write_zeroes ( tio - > md ) ;
}
2016-05-12 16:28:10 -04:00
2017-04-26 09:40:37 +02:00
switch ( r ) {
case DM_ENDIO_DONE :
2016-05-12 16:28:10 -04:00
/* The target wants to complete the I/O */
2017-04-26 09:40:37 +02:00
dm_end_request ( clone , error ) ;
break ;
case DM_ENDIO_INCOMPLETE :
2016-05-12 16:28:10 -04:00
/* The target will handle the I/O */
return ;
2017-04-26 09:40:37 +02:00
case DM_ENDIO_REQUEUE :
2016-05-12 16:28:10 -04:00
/* The target wants to requeue the I/O */
2016-09-13 12:16:14 -04:00
dm_requeue_original_request ( tio , false ) ;
2017-04-26 09:40:37 +02:00
break ;
2018-01-12 19:53:40 -05:00
case DM_ENDIO_DELAY_REQUEUE :
/* The target wants to requeue the I/O after a delay */
dm_requeue_original_request ( tio , true ) ;
break ;
2017-04-26 09:40:37 +02:00
default :
2016-05-12 16:28:10 -04:00
DMWARN ( " unimplemented target endio return value: %d " , r ) ;
BUG ( ) ;
}
}
/*
* Request completion handler for request - based dm
*/
static void dm_softirq_done ( struct request * rq )
{
bool mapped = true ;
struct dm_rq_target_io * tio = tio_from_request ( rq ) ;
struct request * clone = tio - > clone ;
if ( ! clone ) {
2017-02-24 13:19:32 -07:00
struct mapped_device * md = tio - > md ;
rq_end_stats ( md , rq ) ;
2018-10-10 20:49:26 -06:00
blk_mq_end_request ( rq , tio - > error ) ;
2018-11-08 14:59:41 -05:00
rq_completed ( md ) ;
2016-05-12 16:28:10 -04:00
return ;
}
2016-10-20 15:12:13 +02:00
if ( rq - > rq_flags & RQF_FAILED )
2016-05-12 16:28:10 -04:00
mapped = false ;
dm_done ( clone , tio - > error , mapped ) ;
}
/*
* Complete the clone and the original request with the error status
* through softirq context .
*/
2017-06-03 09:38:04 +02:00
static void dm_complete_request ( struct request * rq , blk_status_t error )
2016-05-12 16:28:10 -04:00
{
struct dm_rq_target_io * tio = tio_from_request ( rq ) ;
tio - > error = error ;
2018-10-10 20:49:26 -06:00
blk_mq_complete_request ( rq ) ;
2016-05-12 16:28:10 -04:00
}
/*
* Complete the not - mapped clone and the original request with the error status
* through softirq context .
* Target ' s rq_end_io ( ) function isn ' t called .
2018-10-10 20:49:26 -06:00
* This may be used when the target ' s clone_and_map_rq ( ) function fails .
2016-05-12 16:28:10 -04:00
*/
2017-06-03 09:38:04 +02:00
static void dm_kill_unmapped_request ( struct request * rq , blk_status_t error )
2016-05-12 16:28:10 -04:00
{
2016-10-20 15:12:13 +02:00
rq - > rq_flags | = RQF_FAILED ;
2016-05-12 16:28:10 -04:00
dm_complete_request ( rq , error ) ;
}
2017-06-03 09:38:04 +02:00
static void end_clone_request ( struct request * clone , blk_status_t error )
2016-05-12 16:28:10 -04:00
{
struct dm_rq_target_io * tio = clone - > end_io_data ;
dm_complete_request ( tio - > orig , error ) ;
}
2018-01-17 11:25:57 -05:00
static blk_status_t dm_dispatch_clone_request ( struct request * clone , struct request * rq )
2016-05-12 16:28:10 -04:00
{
2017-06-03 09:38:04 +02:00
blk_status_t r ;
2016-05-12 16:28:10 -04:00
if ( blk_queue_io_stat ( clone - > q ) )
2016-10-20 15:12:13 +02:00
clone - > rq_flags | = RQF_IO_STAT ;
2016-05-12 16:28:10 -04:00
block: consolidate struct request timestamp fields
Currently, struct request has four timestamp fields:
- A start time, set at get_request time, in jiffies, used for iostats
- An I/O start time, set at start_request time, in ktime nanoseconds,
used for blk-stats (i.e., wbt, kyber, hybrid polling)
- Another start time and another I/O start time, used for cfq and bfq
These can all be consolidated into one start time and one I/O start
time, both in ktime nanoseconds, shaving off up to 16 bytes from struct
request depending on the kernel config.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2018-05-09 02:08:53 -07:00
clone - > start_time_ns = ktime_get_ns ( ) ;
2016-05-12 16:28:10 -04:00
r = blk_insert_cloned_request ( clone - > q , clone ) ;
2018-01-30 22:04:57 -05:00
if ( r ! = BLK_STS_OK & & r ! = BLK_STS_RESOURCE & & r ! = BLK_STS_DEV_RESOURCE )
2016-05-12 16:28:10 -04:00
/* must complete clone in terms of original request */
dm_complete_request ( rq , r ) ;
2018-01-17 11:25:57 -05:00
return r ;
2016-05-12 16:28:10 -04:00
}
static int dm_rq_bio_constructor ( struct bio * bio , struct bio * bio_orig ,
void * data )
{
struct dm_rq_target_io * tio = data ;
struct dm_rq_clone_bio_info * info =
container_of ( bio , struct dm_rq_clone_bio_info , clone ) ;
info - > orig = bio_orig ;
info - > tio = tio ;
bio - > bi_end_io = end_clone_bio ;
return 0 ;
}
static int setup_clone ( struct request * clone , struct request * rq ,
struct dm_rq_target_io * tio , gfp_t gfp_mask )
{
int r ;
2018-05-20 18:25:53 -04:00
r = blk_rq_prep_clone ( clone , rq , & tio - > md - > bs , gfp_mask ,
2016-05-12 16:28:10 -04:00
dm_rq_bio_constructor , tio ) ;
if ( r )
return r ;
clone - > end_io = end_clone_request ;
clone - > end_io_data = tio ;
tio - > clone = clone ;
return 0 ;
}
static void init_tio ( struct dm_rq_target_io * tio , struct request * rq ,
struct mapped_device * md )
{
tio - > md = md ;
tio - > ti = NULL ;
tio - > clone = NULL ;
tio - > orig = rq ;
tio - > error = 0 ;
2017-08-24 20:19:52 +08:00
tio - > completed = 0 ;
2016-05-12 16:28:10 -04:00
/*
* Avoid initializing info for blk - mq ; it passes
* target - specific data through info . ptr
* ( see : dm_mq_init_request )
*/
if ( ! md - > init_tio_pdu )
memset ( & tio - > info , 0 , sizeof ( tio - > info ) ) ;
}
/*
* Returns :
2016-09-09 19:24:57 -04:00
* DM_MAPIO_ * : the request has been processed as indicated
* DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
2016-05-12 16:28:10 -04:00
* < 0 : the request was completed due to failure
*/
2016-09-13 12:16:14 -04:00
static int map_request ( struct dm_rq_target_io * tio )
2016-05-12 16:28:10 -04:00
{
int r ;
struct dm_target * ti = tio - > ti ;
2016-09-13 12:16:14 -04:00
struct mapped_device * md = tio - > md ;
struct request * rq = tio - > orig ;
2016-05-12 16:28:10 -04:00
struct request * clone = NULL ;
2018-01-17 11:25:57 -05:00
blk_status_t ret ;
2016-05-12 16:28:10 -04:00
2017-01-22 18:32:46 +01:00
r = ti - > type - > clone_and_map_rq ( ti , rq , & tio - > info , & clone ) ;
2016-05-12 16:28:10 -04:00
switch ( r ) {
case DM_MAPIO_SUBMITTED :
/* The target has taken the I/O to submit by itself later */
break ;
case DM_MAPIO_REMAPPED :
2017-01-22 18:32:46 +01:00
if ( setup_clone ( clone , rq , tio , GFP_ATOMIC ) ) {
/* -ENOMEM */
ti - > type - > release_clone_rq ( clone ) ;
return DM_MAPIO_REQUEUE ;
}
2016-05-12 16:28:10 -04:00
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap ( clone - > q , clone , disk_devt ( dm_disk ( md ) ) ,
blk_rq_pos ( rq ) ) ;
2018-01-17 11:25:57 -05:00
ret = dm_dispatch_clone_request ( clone , rq ) ;
2018-01-30 22:04:57 -05:00
if ( ret = = BLK_STS_RESOURCE | | ret = = BLK_STS_DEV_RESOURCE ) {
2018-01-17 11:25:57 -05:00
blk_rq_unprep_clone ( clone ) ;
tio - > ti - > type - > release_clone_rq ( clone ) ;
tio - > clone = NULL ;
2018-12-10 11:55:56 -05:00
return DM_MAPIO_REQUEUE ;
2018-01-17 11:25:57 -05:00
}
2016-05-12 16:28:10 -04:00
break ;
case DM_MAPIO_REQUEUE :
/* The target wants to requeue the I/O */
2016-09-09 19:24:57 -04:00
break ;
case DM_MAPIO_DELAY_REQUEUE :
/* The target wants to requeue the I/O after a delay */
2016-09-13 12:16:14 -04:00
dm_requeue_original_request ( tio , true ) ;
2016-05-12 16:28:10 -04:00
break ;
2017-04-26 09:40:39 +02:00
case DM_MAPIO_KILL :
2016-05-12 16:28:10 -04:00
/* The target wants to complete the I/O */
2017-06-03 09:38:04 +02:00
dm_kill_unmapped_request ( rq , BLK_STS_IOERR ) ;
2017-05-15 17:28:36 +02:00
break ;
2017-04-26 09:40:39 +02:00
default :
DMWARN ( " unimplemented target map return value: %d " , r ) ;
BUG ( ) ;
2016-05-12 16:28:10 -04:00
}
2016-09-09 19:24:57 -04:00
return r ;
2016-05-12 16:28:10 -04:00
}
2018-10-10 20:49:26 -06:00
/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show ( struct mapped_device * md , char * buf )
{
return sprintf ( buf , " %u \n " , 0 ) ;
}
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store ( struct mapped_device * md ,
const char * buf , size_t count )
{
return count ;
}
2016-05-12 16:28:10 -04:00
static void dm_start_request ( struct mapped_device * md , struct request * orig )
{
2018-10-10 20:49:26 -06:00
blk_mq_start_request ( orig ) ;
2016-05-12 16:28:10 -04:00
if ( unlikely ( dm_stats_used ( & md - > stats ) ) ) {
struct dm_rq_target_io * tio = tio_from_request ( orig ) ;
tio - > duration_jiffies = jiffies ;
tio - > n_sectors = blk_rq_sectors ( orig ) ;
dm_stats_account_io ( & md - > stats , rq_data_dir ( orig ) ,
blk_rq_pos ( orig ) , tio - > n_sectors , false , 0 ,
& tio - > stats_aux ) ;
}
/*
* Hold the md reference here for the in - flight I / O .
* We can ' t rely on the reference count by device opener ,
* because the device may be closed during the request completion
* when all bios are completed .
* See the comment in rq_completed ( ) too .
*/
dm_get ( md ) ;
}
2018-10-10 20:49:26 -06:00
static int dm_mq_init_request ( struct blk_mq_tag_set * set , struct request * rq ,
unsigned int hctx_idx , unsigned int numa_node )
2017-01-22 18:32:46 +01:00
{
2018-10-10 20:49:26 -06:00
struct mapped_device * md = set - > driver_data ;
2017-01-22 18:32:46 +01:00
struct dm_rq_target_io * tio = blk_mq_rq_to_pdu ( rq ) ;
/*
* Must initialize md member of tio , otherwise it won ' t
* be available in dm_mq_queue_rq .
*/
tio - > md = md ;
if ( md - > init_tio_pdu ) {
/* target-specific per-io data is immediately after the tio */
tio - > info . ptr = tio + 1 ;
}
return 0 ;
}
2017-06-03 09:38:05 +02:00
static blk_status_t dm_mq_queue_rq ( struct blk_mq_hw_ctx * hctx ,
2016-05-12 16:28:10 -04:00
const struct blk_mq_queue_data * bd )
{
struct request * rq = bd - > rq ;
struct dm_rq_target_io * tio = blk_mq_rq_to_pdu ( rq ) ;
struct mapped_device * md = tio - > md ;
struct dm_target * ti = md - > immutable_target ;
if ( unlikely ( ! ti ) ) {
int srcu_idx ;
struct dm_table * map = dm_get_live_table ( md , & srcu_idx ) ;
ti = dm_table_find_target ( map , 0 ) ;
dm_put_live_table ( md , srcu_idx ) ;
}
if ( ti - > type - > busy & & ti - > type - > busy ( ti ) )
2017-06-03 09:38:05 +02:00
return BLK_STS_RESOURCE ;
2016-05-12 16:28:10 -04:00
dm_start_request ( md , rq ) ;
/* Init tio using md established in .init_request */
init_tio ( tio , rq , md ) ;
/*
* Establish tio - > ti before calling map_request ( ) .
*/
tio - > ti = ti ;
/* Direct call is fine since .queue_rq allows allocations */
2016-09-13 12:16:14 -04:00
if ( map_request ( tio ) = = DM_MAPIO_REQUEUE ) {
2016-05-12 16:28:10 -04:00
/* Undo dm_start_request() before requeuing */
rq_end_stats ( md , rq ) ;
2018-11-08 14:59:41 -05:00
rq_completed ( md ) ;
2017-06-03 09:38:05 +02:00
return BLK_STS_RESOURCE ;
2016-05-12 16:28:10 -04:00
}
2017-06-03 09:38:05 +02:00
return BLK_STS_OK ;
2016-05-12 16:28:10 -04:00
}
2017-03-30 13:39:16 -07:00
static const struct blk_mq_ops dm_mq_ops = {
2016-05-12 16:28:10 -04:00
. queue_rq = dm_mq_queue_rq ,
. complete = dm_softirq_done ,
. init_request = dm_mq_init_request ,
} ;
2016-05-24 21:16:51 -04:00
int dm_mq_init_request_queue ( struct mapped_device * md , struct dm_table * t )
2016-05-12 16:28:10 -04:00
{
struct request_queue * q ;
2016-05-24 21:16:51 -04:00
struct dm_target * immutable_tgt ;
2016-05-12 16:28:10 -04:00
int err ;
md - > tag_set = kzalloc_node ( sizeof ( struct blk_mq_tag_set ) , GFP_KERNEL , md - > numa_node_id ) ;
if ( ! md - > tag_set )
return - ENOMEM ;
md - > tag_set - > ops = & dm_mq_ops ;
md - > tag_set - > queue_depth = dm_get_blk_mq_queue_depth ( ) ;
md - > tag_set - > numa_node = md - > numa_node_id ;
md - > tag_set - > flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE ;
md - > tag_set - > nr_hw_queues = dm_get_blk_mq_nr_hw_queues ( ) ;
md - > tag_set - > driver_data = md ;
md - > tag_set - > cmd_size = sizeof ( struct dm_rq_target_io ) ;
2016-05-24 21:16:51 -04:00
immutable_tgt = dm_table_get_immutable_target ( t ) ;
2016-05-12 16:28:10 -04:00
if ( immutable_tgt & & immutable_tgt - > per_io_data_size ) {
/* any target-specific per-io data is immediately after the tio */
md - > tag_set - > cmd_size + = immutable_tgt - > per_io_data_size ;
md - > init_tio_pdu = true ;
}
err = blk_mq_alloc_tag_set ( md - > tag_set ) ;
if ( err )
goto out_kfree_tag_set ;
q = blk_mq_init_allocated_queue ( md - > tag_set , md - > queue ) ;
if ( IS_ERR ( q ) ) {
err = PTR_ERR ( q ) ;
goto out_tag_set ;
}
return 0 ;
out_tag_set :
blk_mq_free_tag_set ( md - > tag_set ) ;
out_kfree_tag_set :
kfree ( md - > tag_set ) ;
return err ;
}
void dm_mq_cleanup_mapped_device ( struct mapped_device * md )
{
if ( md - > tag_set ) {
blk_mq_free_tag_set ( md - > tag_set ) ;
kfree ( md - > tag_set ) ;
}
}
module_param ( reserved_rq_based_ios , uint , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( reserved_rq_based_ios , " Reserved IOs in request-based mempools " ) ;
2018-10-10 20:49:26 -06:00
/* Unused, but preserved for userspace compatibility */
static bool use_blk_mq = true ;
2016-05-12 16:28:10 -04:00
module_param ( use_blk_mq , bool , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( use_blk_mq , " Use block multiqueue for request-based DM devices " ) ;
module_param ( dm_mq_nr_hw_queues , uint , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( dm_mq_nr_hw_queues , " Number of hardware queues for request-based dm-mq devices " ) ;
module_param ( dm_mq_queue_depth , uint , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( dm_mq_queue_depth , " Queue depth for request-based dm-mq devices " ) ;