2016-05-12 23:28:10 +03:00
/*
* Copyright ( C ) 2016 Red Hat , Inc . All rights reserved .
*
* This file is released under the GPL .
*/
# include "dm-core.h"
# include "dm-rq.h"
# include <linux/elevator.h> /* for rq_end_sector() */
# include <linux/blk-mq.h>
# define DM_MSG_PREFIX "core-rq"
# define DM_MQ_NR_HW_QUEUES 1
# define DM_MQ_QUEUE_DEPTH 2048
static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES ;
static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH ;
/*
* Request - based DM ' s mempools ' reserved IOs set by the user .
*/
# define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS ;
2016-11-19 01:27:42 +03:00
static bool use_blk_mq = IS_ENABLED ( CONFIG_DM_MQ_DEFAULT ) ;
2016-05-12 23:28:10 +03:00
bool dm_use_blk_mq_default ( void )
{
return use_blk_mq ;
}
bool dm_use_blk_mq ( struct mapped_device * md )
{
return md - > use_blk_mq ;
}
EXPORT_SYMBOL_GPL ( dm_use_blk_mq ) ;
unsigned dm_get_reserved_rq_based_ios ( void )
{
return __dm_get_module_param ( & reserved_rq_based_ios ,
RESERVED_REQUEST_BASED_IOS , DM_RESERVED_MAX_IOS ) ;
}
EXPORT_SYMBOL_GPL ( dm_get_reserved_rq_based_ios ) ;
static unsigned dm_get_blk_mq_nr_hw_queues ( void )
{
return __dm_get_module_param ( & dm_mq_nr_hw_queues , 1 , 32 ) ;
}
static unsigned dm_get_blk_mq_queue_depth ( void )
{
return __dm_get_module_param ( & dm_mq_queue_depth ,
DM_MQ_QUEUE_DEPTH , BLK_MQ_MAX_DEPTH ) ;
}
int dm_request_based ( struct mapped_device * md )
{
2017-10-05 22:22:52 +03:00
return queue_is_rq_based ( md - > queue ) ;
2016-05-12 23:28:10 +03:00
}
static void dm_old_start_queue ( struct request_queue * q )
{
unsigned long flags ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
if ( blk_queue_stopped ( q ) )
blk_start_queue ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2016-09-01 18:59:33 +03:00
static void dm_mq_start_queue ( struct request_queue * q )
{
2017-06-06 18:22:04 +03:00
blk_mq_unquiesce_queue ( q ) ;
2016-09-01 18:59:33 +03:00
blk_mq_kick_requeue_list ( q ) ;
}
2016-05-12 23:28:10 +03:00
void dm_start_queue ( struct request_queue * q )
{
if ( ! q - > mq_ops )
dm_old_start_queue ( q ) ;
2016-09-01 18:59:33 +03:00
else
dm_mq_start_queue ( q ) ;
2016-05-12 23:28:10 +03:00
}
static void dm_old_stop_queue ( struct request_queue * q )
{
unsigned long flags ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
2016-09-01 01:17:24 +03:00
if ( ! blk_queue_stopped ( q ) )
blk_stop_queue ( q ) ;
2016-05-12 23:28:10 +03:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2016-09-01 01:18:11 +03:00
static void dm_mq_stop_queue ( struct request_queue * q )
{
2016-10-29 03:22:00 +03:00
if ( blk_mq_queue_stopped ( q ) )
2016-05-12 23:28:10 +03:00
return ;
2016-09-01 01:18:11 +03:00
2016-10-29 03:22:16 +03:00
blk_mq_quiesce_queue ( q ) ;
2016-05-12 23:28:10 +03:00
}
void dm_stop_queue ( struct request_queue * q )
{
if ( ! q - > mq_ops )
dm_old_stop_queue ( q ) ;
2016-09-01 01:18:11 +03:00
else
dm_mq_stop_queue ( q ) ;
2016-05-12 23:28:10 +03:00
}
/*
* Partial completion handling for request - based dm
*/
static void end_clone_bio ( struct bio * clone )
{
struct dm_rq_clone_bio_info * info =
container_of ( clone , struct dm_rq_clone_bio_info , clone ) ;
struct dm_rq_target_io * tio = info - > tio ;
unsigned int nr_bytes = info - > orig - > bi_iter . bi_size ;
2017-06-03 10:38:06 +03:00
blk_status_t error = clone - > bi_status ;
2017-08-24 15:19:52 +03:00
bool is_last = ! clone - > bi_next ;
2016-05-12 23:28:10 +03:00
bio_put ( clone ) ;
if ( tio - > error )
/*
* An error has already been detected on the request .
* Once error occurred , just let clone - > end_io ( ) handle
* the remainder .
*/
return ;
else if ( error ) {
/*
* Don ' t notice the error to the upper layer yet .
* The error handling decision is made by the target driver ,
* when the request is completed .
*/
tio - > error = error ;
2017-08-24 15:19:52 +03:00
goto exit ;
2016-05-12 23:28:10 +03:00
}
/*
* I / O for the bio successfully completed .
* Notice the data completion to the upper layer .
*/
2017-08-24 15:19:52 +03:00
tio - > completed + = nr_bytes ;
2016-05-12 23:28:10 +03:00
/*
* Update the original request .
* Do not use blk_end_request ( ) here , because it may complete
* the original request before the clone , and break the ordering .
*/
2017-08-24 15:19:52 +03:00
if ( is_last )
exit :
blk_update_request ( tio - > orig , BLK_STS_OK , tio - > completed ) ;
2016-05-12 23:28:10 +03:00
}
static struct dm_rq_target_io * tio_from_request ( struct request * rq )
{
2017-01-22 20:32:46 +03:00
return blk_mq_rq_to_pdu ( rq ) ;
2016-05-12 23:28:10 +03:00
}
static void rq_end_stats ( struct mapped_device * md , struct request * orig )
{
if ( unlikely ( dm_stats_used ( & md - > stats ) ) ) {
struct dm_rq_target_io * tio = tio_from_request ( orig ) ;
tio - > duration_jiffies = jiffies - tio - > duration_jiffies ;
dm_stats_account_io ( & md - > stats , rq_data_dir ( orig ) ,
blk_rq_pos ( orig ) , tio - > n_sectors , true ,
tio - > duration_jiffies , & tio - > stats_aux ) ;
}
}
/*
* Don ' t touch any member of the md after calling this function because
* the md may be freed in dm_put ( ) at the end of this function .
* Or do dm_get ( ) before calling this function and dm_put ( ) later .
*/
static void rq_completed ( struct mapped_device * md , int rw , bool run_queue )
{
2016-11-12 04:05:27 +03:00
struct request_queue * q = md - > queue ;
unsigned long flags ;
2016-05-12 23:28:10 +03:00
atomic_dec ( & md - > pending [ rw ] ) ;
/* nudge anyone waiting on suspend queue */
if ( ! md_in_flight ( md ) )
wake_up ( & md - > wait ) ;
/*
* Run this off this callpath , as drivers could invoke end_io while
* inside their request_fn ( and holding the queue lock ) . Calling
* back into - > request_fn ( ) could deadlock attempting to grab the
* queue lock again .
*/
2016-11-12 04:05:27 +03:00
if ( ! q - > mq_ops & & run_queue ) {
spin_lock_irqsave ( q - > queue_lock , flags ) ;
blk_run_queue_async ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2016-05-12 23:28:10 +03:00
/*
* dm_put ( ) must be at the end of this function . See the comment above
*/
dm_put ( md ) ;
}
/*
* Complete the clone and the original request .
* Must be called without clone ' s queue lock held ,
* see end_clone_request ( ) for more details .
*/
2017-06-03 10:38:04 +03:00
static void dm_end_request ( struct request * clone , blk_status_t error )
2016-05-12 23:28:10 +03:00
{
int rw = rq_data_dir ( clone ) ;
struct dm_rq_target_io * tio = clone - > end_io_data ;
struct mapped_device * md = tio - > md ;
struct request * rq = tio - > orig ;
2017-01-22 20:32:46 +03:00
blk_rq_unprep_clone ( clone ) ;
tio - > ti - > type - > release_clone_rq ( clone ) ;
2016-05-12 23:28:10 +03:00
rq_end_stats ( md , rq ) ;
if ( ! rq - > q - > mq_ops )
blk_end_request_all ( rq , error ) ;
else
blk_mq_end_request ( rq , error ) ;
rq_completed ( md , rw , true ) ;
}
/*
* Requeue the original request of a clone .
*/
2017-08-09 21:32:16 +03:00
static void dm_old_requeue_request ( struct request * rq , unsigned long delay_ms )
2016-05-12 23:28:10 +03:00
{
struct request_queue * q = rq - > q ;
unsigned long flags ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
blk_requeue_request ( q , rq ) ;
2017-08-09 21:32:16 +03:00
blk_delay_queue ( q , delay_ms ) ;
2016-05-12 23:28:10 +03:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2016-09-14 17:36:39 +03:00
static void __dm_mq_kick_requeue_list ( struct request_queue * q , unsigned long msecs )
2016-05-12 23:28:10 +03:00
{
2016-10-29 03:20:32 +03:00
blk_mq_delay_kick_requeue_list ( q , msecs ) ;
2016-05-12 23:28:10 +03:00
}
2016-09-14 17:36:39 +03:00
void dm_mq_kick_requeue_list ( struct mapped_device * md )
{
__dm_mq_kick_requeue_list ( dm_get_md_queue ( md ) , 0 ) ;
}
EXPORT_SYMBOL ( dm_mq_kick_requeue_list ) ;
static void dm_mq_delay_requeue_request ( struct request * rq , unsigned long msecs )
{
2016-10-29 03:21:41 +03:00
blk_mq_requeue_request ( rq , false ) ;
2016-09-14 17:36:39 +03:00
__dm_mq_kick_requeue_list ( rq - > q , msecs ) ;
}
2016-09-13 19:16:14 +03:00
static void dm_requeue_original_request ( struct dm_rq_target_io * tio , bool delay_requeue )
2016-05-12 23:28:10 +03:00
{
2016-09-13 19:16:14 +03:00
struct mapped_device * md = tio - > md ;
struct request * rq = tio - > orig ;
2016-05-12 23:28:10 +03:00
int rw = rq_data_dir ( rq ) ;
2017-08-09 21:32:16 +03:00
unsigned long delay_ms = delay_requeue ? 100 : 0 ;
2016-05-12 23:28:10 +03:00
rq_end_stats ( md , rq ) ;
2017-01-22 20:32:46 +03:00
if ( tio - > clone ) {
blk_rq_unprep_clone ( tio - > clone ) ;
tio - > ti - > type - > release_clone_rq ( tio - > clone ) ;
}
2016-05-12 23:28:10 +03:00
if ( ! rq - > q - > mq_ops )
2017-08-09 21:32:16 +03:00
dm_old_requeue_request ( rq , delay_ms ) ;
2016-05-12 23:28:10 +03:00
else
2017-08-09 21:32:16 +03:00
dm_mq_delay_requeue_request ( rq , delay_ms ) ;
2016-05-12 23:28:10 +03:00
rq_completed ( md , rw , false ) ;
}
2017-06-03 10:38:04 +03:00
static void dm_done ( struct request * clone , blk_status_t error , bool mapped )
2016-05-12 23:28:10 +03:00
{
2017-04-26 10:40:37 +03:00
int r = DM_ENDIO_DONE ;
2016-05-12 23:28:10 +03:00
struct dm_rq_target_io * tio = clone - > end_io_data ;
dm_request_endio_fn rq_end_io = NULL ;
if ( tio - > ti ) {
rq_end_io = tio - > ti - > type - > rq_end_io ;
if ( mapped & & rq_end_io )
r = rq_end_io ( tio - > ti , clone , error , & tio - > info ) ;
}
2017-06-03 10:38:04 +03:00
if ( unlikely ( error = = BLK_STS_TARGET ) ) {
2017-04-05 20:21:05 +03:00
if ( req_op ( clone ) = = REQ_OP_WRITE_SAME & &
! clone - > q - > limits . max_write_same_sectors )
disable_write_same ( tio - > md ) ;
if ( req_op ( clone ) = = REQ_OP_WRITE_ZEROES & &
! clone - > q - > limits . max_write_zeroes_sectors )
disable_write_zeroes ( tio - > md ) ;
}
2016-05-12 23:28:10 +03:00
2017-04-26 10:40:37 +03:00
switch ( r ) {
case DM_ENDIO_DONE :
2016-05-12 23:28:10 +03:00
/* The target wants to complete the I/O */
2017-04-26 10:40:37 +03:00
dm_end_request ( clone , error ) ;
break ;
case DM_ENDIO_INCOMPLETE :
2016-05-12 23:28:10 +03:00
/* The target will handle the I/O */
return ;
2017-04-26 10:40:37 +03:00
case DM_ENDIO_REQUEUE :
2016-05-12 23:28:10 +03:00
/* The target wants to requeue the I/O */
2016-09-13 19:16:14 +03:00
dm_requeue_original_request ( tio , false ) ;
2017-04-26 10:40:37 +03:00
break ;
default :
2016-05-12 23:28:10 +03:00
DMWARN ( " unimplemented target endio return value: %d " , r ) ;
BUG ( ) ;
}
}
/*
* Request completion handler for request - based dm
*/
static void dm_softirq_done ( struct request * rq )
{
bool mapped = true ;
struct dm_rq_target_io * tio = tio_from_request ( rq ) ;
struct request * clone = tio - > clone ;
int rw ;
if ( ! clone ) {
2017-02-24 23:19:32 +03:00
struct mapped_device * md = tio - > md ;
rq_end_stats ( md , rq ) ;
2016-05-12 23:28:10 +03:00
rw = rq_data_dir ( rq ) ;
2017-01-22 20:32:46 +03:00
if ( ! rq - > q - > mq_ops )
2016-05-12 23:28:10 +03:00
blk_end_request_all ( rq , tio - > error ) ;
2017-01-22 20:32:46 +03:00
else
2016-05-12 23:28:10 +03:00
blk_mq_end_request ( rq , tio - > error ) ;
2017-02-24 23:19:32 +03:00
rq_completed ( md , rw , false ) ;
2016-05-12 23:28:10 +03:00
return ;
}
2016-10-20 16:12:13 +03:00
if ( rq - > rq_flags & RQF_FAILED )
2016-05-12 23:28:10 +03:00
mapped = false ;
dm_done ( clone , tio - > error , mapped ) ;
}
/*
* Complete the clone and the original request with the error status
* through softirq context .
*/
2017-06-03 10:38:04 +03:00
static void dm_complete_request ( struct request * rq , blk_status_t error )
2016-05-12 23:28:10 +03:00
{
struct dm_rq_target_io * tio = tio_from_request ( rq ) ;
tio - > error = error ;
if ( ! rq - > q - > mq_ops )
blk_complete_request ( rq ) ;
else
2017-04-20 17:03:09 +03:00
blk_mq_complete_request ( rq ) ;
2016-05-12 23:28:10 +03:00
}
/*
* Complete the not - mapped clone and the original request with the error status
* through softirq context .
* Target ' s rq_end_io ( ) function isn ' t called .
* This may be used when the target ' s map_rq ( ) or clone_and_map_rq ( ) functions fail .
*/
2017-06-03 10:38:04 +03:00
static void dm_kill_unmapped_request ( struct request * rq , blk_status_t error )
2016-05-12 23:28:10 +03:00
{
2016-10-20 16:12:13 +03:00
rq - > rq_flags | = RQF_FAILED ;
2016-05-12 23:28:10 +03:00
dm_complete_request ( rq , error ) ;
}
/*
* Called with the clone ' s queue lock held ( in the case of . request_fn )
*/
2017-06-03 10:38:04 +03:00
static void end_clone_request ( struct request * clone , blk_status_t error )
2016-05-12 23:28:10 +03:00
{
struct dm_rq_target_io * tio = clone - > end_io_data ;
/*
* Actual request completion is done in a softirq context which doesn ' t
* hold the clone ' s queue lock . Otherwise , deadlock could occur because :
* - another request may be submitted by the upper level driver
* of the stacking during the completion
* - the submission which requires queue lock may be done
* against this clone ' s queue
*/
dm_complete_request ( tio - > orig , error ) ;
}
static void dm_dispatch_clone_request ( struct request * clone , struct request * rq )
{
2017-06-03 10:38:04 +03:00
blk_status_t r ;
2016-05-12 23:28:10 +03:00
if ( blk_queue_io_stat ( clone - > q ) )
2016-10-20 16:12:13 +03:00
clone - > rq_flags | = RQF_IO_STAT ;
2016-05-12 23:28:10 +03:00
clone - > start_time = jiffies ;
r = blk_insert_cloned_request ( clone - > q , clone ) ;
if ( r )
/* must complete clone in terms of original request */
dm_complete_request ( rq , r ) ;
}
static int dm_rq_bio_constructor ( struct bio * bio , struct bio * bio_orig ,
void * data )
{
struct dm_rq_target_io * tio = data ;
struct dm_rq_clone_bio_info * info =
container_of ( bio , struct dm_rq_clone_bio_info , clone ) ;
info - > orig = bio_orig ;
info - > tio = tio ;
bio - > bi_end_io = end_clone_bio ;
return 0 ;
}
static int setup_clone ( struct request * clone , struct request * rq ,
struct dm_rq_target_io * tio , gfp_t gfp_mask )
{
int r ;
r = blk_rq_prep_clone ( clone , rq , tio - > md - > bs , gfp_mask ,
dm_rq_bio_constructor , tio ) ;
if ( r )
return r ;
clone - > end_io = end_clone_request ;
clone - > end_io_data = tio ;
tio - > clone = clone ;
return 0 ;
}
static void map_tio_request ( struct kthread_work * work ) ;
static void init_tio ( struct dm_rq_target_io * tio , struct request * rq ,
struct mapped_device * md )
{
tio - > md = md ;
tio - > ti = NULL ;
tio - > clone = NULL ;
tio - > orig = rq ;
tio - > error = 0 ;
2017-08-24 15:19:52 +03:00
tio - > completed = 0 ;
2016-05-12 23:28:10 +03:00
/*
* Avoid initializing info for blk - mq ; it passes
* target - specific data through info . ptr
* ( see : dm_mq_init_request )
*/
if ( ! md - > init_tio_pdu )
memset ( & tio - > info , 0 , sizeof ( tio - > info ) ) ;
if ( md - > kworker_task )
2016-10-11 23:55:20 +03:00
kthread_init_work ( & tio - > work , map_tio_request ) ;
2016-05-12 23:28:10 +03:00
}
/*
* Returns :
2016-09-10 02:24:57 +03:00
* DM_MAPIO_ * : the request has been processed as indicated
* DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
2016-05-12 23:28:10 +03:00
* < 0 : the request was completed due to failure
*/
2016-09-13 19:16:14 +03:00
static int map_request ( struct dm_rq_target_io * tio )
2016-05-12 23:28:10 +03:00
{
int r ;
struct dm_target * ti = tio - > ti ;
2016-09-13 19:16:14 +03:00
struct mapped_device * md = tio - > md ;
struct request * rq = tio - > orig ;
2016-05-12 23:28:10 +03:00
struct request * clone = NULL ;
2017-01-22 20:32:46 +03:00
r = ti - > type - > clone_and_map_rq ( ti , rq , & tio - > info , & clone ) ;
2016-05-12 23:28:10 +03:00
switch ( r ) {
case DM_MAPIO_SUBMITTED :
/* The target has taken the I/O to submit by itself later */
break ;
case DM_MAPIO_REMAPPED :
2017-01-22 20:32:46 +03:00
if ( setup_clone ( clone , rq , tio , GFP_ATOMIC ) ) {
/* -ENOMEM */
ti - > type - > release_clone_rq ( clone ) ;
return DM_MAPIO_REQUEUE ;
}
2016-05-12 23:28:10 +03:00
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap ( clone - > q , clone , disk_devt ( dm_disk ( md ) ) ,
blk_rq_pos ( rq ) ) ;
dm_dispatch_clone_request ( clone , rq ) ;
break ;
case DM_MAPIO_REQUEUE :
/* The target wants to requeue the I/O */
2016-09-10 02:24:57 +03:00
break ;
case DM_MAPIO_DELAY_REQUEUE :
/* The target wants to requeue the I/O after a delay */
2016-09-13 19:16:14 +03:00
dm_requeue_original_request ( tio , true ) ;
2016-05-12 23:28:10 +03:00
break ;
2017-04-26 10:40:39 +03:00
case DM_MAPIO_KILL :
2016-05-12 23:28:10 +03:00
/* The target wants to complete the I/O */
2017-06-03 10:38:04 +03:00
dm_kill_unmapped_request ( rq , BLK_STS_IOERR ) ;
2017-05-15 18:28:36 +03:00
break ;
2017-04-26 10:40:39 +03:00
default :
DMWARN ( " unimplemented target map return value: %d " , r ) ;
BUG ( ) ;
2016-05-12 23:28:10 +03:00
}
2016-09-10 02:24:57 +03:00
return r ;
2016-05-12 23:28:10 +03:00
}
static void dm_start_request ( struct mapped_device * md , struct request * orig )
{
if ( ! orig - > q - > mq_ops )
blk_start_request ( orig ) ;
else
blk_mq_start_request ( orig ) ;
atomic_inc ( & md - > pending [ rq_data_dir ( orig ) ] ) ;
if ( md - > seq_rq_merge_deadline_usecs ) {
md - > last_rq_pos = rq_end_sector ( orig ) ;
md - > last_rq_rw = rq_data_dir ( orig ) ;
md - > last_rq_start_time = ktime_get ( ) ;
}
if ( unlikely ( dm_stats_used ( & md - > stats ) ) ) {
struct dm_rq_target_io * tio = tio_from_request ( orig ) ;
tio - > duration_jiffies = jiffies ;
tio - > n_sectors = blk_rq_sectors ( orig ) ;
dm_stats_account_io ( & md - > stats , rq_data_dir ( orig ) ,
blk_rq_pos ( orig ) , tio - > n_sectors , false , 0 ,
& tio - > stats_aux ) ;
}
/*
* Hold the md reference here for the in - flight I / O .
* We can ' t rely on the reference count by device opener ,
* because the device may be closed during the request completion
* when all bios are completed .
* See the comment in rq_completed ( ) too .
*/
dm_get ( md ) ;
}
2017-01-22 20:32:46 +03:00
static int __dm_rq_init_rq ( struct mapped_device * md , struct request * rq )
{
struct dm_rq_target_io * tio = blk_mq_rq_to_pdu ( rq ) ;
/*
* Must initialize md member of tio , otherwise it won ' t
* be available in dm_mq_queue_rq .
*/
tio - > md = md ;
if ( md - > init_tio_pdu ) {
/* target-specific per-io data is immediately after the tio */
tio - > info . ptr = tio + 1 ;
}
return 0 ;
}
static int dm_rq_init_rq ( struct request_queue * q , struct request * rq , gfp_t gfp )
{
return __dm_rq_init_rq ( q - > rq_alloc_data , rq ) ;
}
2016-05-12 23:28:10 +03:00
static void map_tio_request ( struct kthread_work * work )
{
struct dm_rq_target_io * tio = container_of ( work , struct dm_rq_target_io , work ) ;
2016-09-13 19:16:14 +03:00
if ( map_request ( tio ) = = DM_MAPIO_REQUEUE )
dm_requeue_original_request ( tio , false ) ;
2016-05-12 23:28:10 +03:00
}
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show ( struct mapped_device * md , char * buf )
{
return sprintf ( buf , " %u \n " , md - > seq_rq_merge_deadline_usecs ) ;
}
# define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store ( struct mapped_device * md ,
const char * buf , size_t count )
{
unsigned deadline ;
2016-05-25 04:16:51 +03:00
if ( dm_get_md_type ( md ) ! = DM_TYPE_REQUEST_BASED )
2016-05-12 23:28:10 +03:00
return count ;
if ( kstrtouint ( buf , 10 , & deadline ) )
return - EINVAL ;
if ( deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS )
deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS ;
md - > seq_rq_merge_deadline_usecs = deadline ;
return count ;
}
static bool dm_old_request_peeked_before_merge_deadline ( struct mapped_device * md )
{
ktime_t kt_deadline ;
if ( ! md - > seq_rq_merge_deadline_usecs )
return false ;
kt_deadline = ns_to_ktime ( ( u64 ) md - > seq_rq_merge_deadline_usecs * NSEC_PER_USEC ) ;
kt_deadline = ktime_add_safe ( md - > last_rq_start_time , kt_deadline ) ;
return ! ktime_after ( ktime_get ( ) , kt_deadline ) ;
}
/*
* q - > request_fn for old request - based dm .
* Called with the queue lock held .
*/
static void dm_old_request_fn ( struct request_queue * q )
{
struct mapped_device * md = q - > queuedata ;
struct dm_target * ti = md - > immutable_target ;
struct request * rq ;
struct dm_rq_target_io * tio ;
sector_t pos = 0 ;
if ( unlikely ( ! ti ) ) {
int srcu_idx ;
struct dm_table * map = dm_get_live_table ( md , & srcu_idx ) ;
2017-01-25 18:24:52 +03:00
if ( unlikely ( ! map ) ) {
dm_put_live_table ( md , srcu_idx ) ;
return ;
}
2016-05-12 23:28:10 +03:00
ti = dm_table_find_target ( map , pos ) ;
dm_put_live_table ( md , srcu_idx ) ;
}
/*
* For suspend , check blk_queue_stopped ( ) and increment
* - > pending within a single queue_lock not to increment the
* number of in - flight I / Os after the queue is stopped in
* dm_suspend ( ) .
*/
while ( ! blk_queue_stopped ( q ) ) {
rq = blk_peek_request ( q ) ;
if ( ! rq )
return ;
/* always use block 0 to find the target for flushes for now */
pos = 0 ;
if ( req_op ( rq ) ! = REQ_OP_FLUSH )
pos = blk_rq_pos ( rq ) ;
if ( ( dm_old_request_peeked_before_merge_deadline ( md ) & &
2016-11-11 15:05:36 +03:00
md_in_flight ( md ) & & rq - > bio & & ! bio_multiple_segments ( rq - > bio ) & &
2016-05-12 23:28:10 +03:00
md - > last_rq_pos = = pos & & md - > last_rq_rw = = rq_data_dir ( rq ) ) | |
( ti - > type - > busy & & ti - > type - > busy ( ti ) ) ) {
2016-07-15 16:27:08 +03:00
blk_delay_queue ( q , 10 ) ;
2016-05-12 23:28:10 +03:00
return ;
}
dm_start_request ( md , rq ) ;
tio = tio_from_request ( rq ) ;
2017-01-22 20:32:46 +03:00
init_tio ( tio , rq , md ) ;
2016-05-12 23:28:10 +03:00
/* Establish tio->ti before queuing work (map_tio_request) */
tio - > ti = ti ;
2016-10-11 23:55:20 +03:00
kthread_queue_work ( & md - > kworker , & tio - > work ) ;
2016-05-12 23:28:10 +03:00
BUG_ON ( ! irqs_disabled ( ) ) ;
}
}
/*
* Fully initialize a . request_fn request - based queue .
*/
2017-01-22 20:32:46 +03:00
int dm_old_init_request_queue ( struct mapped_device * md , struct dm_table * t )
2016-05-12 23:28:10 +03:00
{
2017-01-22 20:32:46 +03:00
struct dm_target * immutable_tgt ;
2016-05-12 23:28:10 +03:00
/* Fully initialize the queue */
2017-01-22 20:32:46 +03:00
md - > queue - > cmd_size = sizeof ( struct dm_rq_target_io ) ;
md - > queue - > rq_alloc_data = md ;
2017-01-03 14:52:44 +03:00
md - > queue - > request_fn = dm_old_request_fn ;
2017-01-22 20:32:46 +03:00
md - > queue - > init_rq_fn = dm_rq_init_rq ;
immutable_tgt = dm_table_get_immutable_target ( t ) ;
if ( immutable_tgt & & immutable_tgt - > per_io_data_size ) {
/* any target-specific per-io data is immediately after the tio */
md - > queue - > cmd_size + = immutable_tgt - > per_io_data_size ;
md - > init_tio_pdu = true ;
}
2017-01-03 14:52:44 +03:00
if ( blk_init_allocated_queue ( md - > queue ) < 0 )
2016-05-12 23:28:10 +03:00
return - EINVAL ;
/* disable dm_old_request_fn's merge heuristic by default */
md - > seq_rq_merge_deadline_usecs = 0 ;
dm_init_normal_md_queue ( md ) ;
blk_queue_softirq_done ( md - > queue , dm_softirq_done ) ;
/* Initialize the request-based DM worker thread */
2016-10-11 23:55:20 +03:00
kthread_init_worker ( & md - > kworker ) ;
2016-05-12 23:28:10 +03:00
md - > kworker_task = kthread_run ( kthread_worker_fn , & md - > kworker ,
" kdmwork-%s " , dm_device_name ( md ) ) ;
2016-10-18 21:02:04 +03:00
if ( IS_ERR ( md - > kworker_task ) ) {
int error = PTR_ERR ( md - > kworker_task ) ;
md - > kworker_task = NULL ;
return error ;
}
2016-05-12 23:28:10 +03:00
return 0 ;
}
2017-05-01 19:19:08 +03:00
static int dm_mq_init_request ( struct blk_mq_tag_set * set , struct request * rq ,
unsigned int hctx_idx , unsigned int numa_node )
2016-05-12 23:28:10 +03:00
{
2017-05-01 19:19:08 +03:00
return __dm_rq_init_rq ( set - > driver_data , rq ) ;
2016-05-12 23:28:10 +03:00
}
2017-06-03 10:38:05 +03:00
static blk_status_t dm_mq_queue_rq ( struct blk_mq_hw_ctx * hctx ,
2016-05-12 23:28:10 +03:00
const struct blk_mq_queue_data * bd )
{
struct request * rq = bd - > rq ;
struct dm_rq_target_io * tio = blk_mq_rq_to_pdu ( rq ) ;
struct mapped_device * md = tio - > md ;
struct dm_target * ti = md - > immutable_target ;
if ( unlikely ( ! ti ) ) {
int srcu_idx ;
struct dm_table * map = dm_get_live_table ( md , & srcu_idx ) ;
ti = dm_table_find_target ( map , 0 ) ;
dm_put_live_table ( md , srcu_idx ) ;
}
if ( ti - > type - > busy & & ti - > type - > busy ( ti ) )
2017-06-03 10:38:05 +03:00
return BLK_STS_RESOURCE ;
2016-05-12 23:28:10 +03:00
dm_start_request ( md , rq ) ;
/* Init tio using md established in .init_request */
init_tio ( tio , rq , md ) ;
/*
* Establish tio - > ti before calling map_request ( ) .
*/
tio - > ti = ti ;
/* Direct call is fine since .queue_rq allows allocations */
2016-09-13 19:16:14 +03:00
if ( map_request ( tio ) = = DM_MAPIO_REQUEUE ) {
2016-05-12 23:28:10 +03:00
/* Undo dm_start_request() before requeuing */
rq_end_stats ( md , rq ) ;
rq_completed ( md , rq_data_dir ( rq ) , false ) ;
2017-04-07 21:16:54 +03:00
blk_mq_delay_run_hw_queue ( hctx , 100 /*ms*/ ) ;
2017-06-03 10:38:05 +03:00
return BLK_STS_RESOURCE ;
2016-05-12 23:28:10 +03:00
}
2017-06-03 10:38:05 +03:00
return BLK_STS_OK ;
2016-05-12 23:28:10 +03:00
}
2017-03-30 23:39:16 +03:00
static const struct blk_mq_ops dm_mq_ops = {
2016-05-12 23:28:10 +03:00
. queue_rq = dm_mq_queue_rq ,
. complete = dm_softirq_done ,
. init_request = dm_mq_init_request ,
} ;
2016-05-25 04:16:51 +03:00
int dm_mq_init_request_queue ( struct mapped_device * md , struct dm_table * t )
2016-05-12 23:28:10 +03:00
{
struct request_queue * q ;
2016-05-25 04:16:51 +03:00
struct dm_target * immutable_tgt ;
2016-05-12 23:28:10 +03:00
int err ;
2016-05-25 04:16:51 +03:00
if ( ! dm_table_all_blk_mq_devices ( t ) ) {
2016-05-12 23:28:10 +03:00
DMERR ( " request-based dm-mq may only be stacked on blk-mq device(s) " ) ;
return - EINVAL ;
}
md - > tag_set = kzalloc_node ( sizeof ( struct blk_mq_tag_set ) , GFP_KERNEL , md - > numa_node_id ) ;
if ( ! md - > tag_set )
return - ENOMEM ;
md - > tag_set - > ops = & dm_mq_ops ;
md - > tag_set - > queue_depth = dm_get_blk_mq_queue_depth ( ) ;
md - > tag_set - > numa_node = md - > numa_node_id ;
md - > tag_set - > flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE ;
md - > tag_set - > nr_hw_queues = dm_get_blk_mq_nr_hw_queues ( ) ;
md - > tag_set - > driver_data = md ;
md - > tag_set - > cmd_size = sizeof ( struct dm_rq_target_io ) ;
2016-05-25 04:16:51 +03:00
immutable_tgt = dm_table_get_immutable_target ( t ) ;
2016-05-12 23:28:10 +03:00
if ( immutable_tgt & & immutable_tgt - > per_io_data_size ) {
/* any target-specific per-io data is immediately after the tio */
md - > tag_set - > cmd_size + = immutable_tgt - > per_io_data_size ;
md - > init_tio_pdu = true ;
}
err = blk_mq_alloc_tag_set ( md - > tag_set ) ;
if ( err )
goto out_kfree_tag_set ;
q = blk_mq_init_allocated_queue ( md - > tag_set , md - > queue ) ;
if ( IS_ERR ( q ) ) {
err = PTR_ERR ( q ) ;
goto out_tag_set ;
}
dm_init_md_queue ( md ) ;
return 0 ;
out_tag_set :
blk_mq_free_tag_set ( md - > tag_set ) ;
out_kfree_tag_set :
kfree ( md - > tag_set ) ;
return err ;
}
void dm_mq_cleanup_mapped_device ( struct mapped_device * md )
{
if ( md - > tag_set ) {
blk_mq_free_tag_set ( md - > tag_set ) ;
kfree ( md - > tag_set ) ;
}
}
module_param ( reserved_rq_based_ios , uint , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( reserved_rq_based_ios , " Reserved IOs in request-based mempools " ) ;
module_param ( use_blk_mq , bool , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( use_blk_mq , " Use block multiqueue for request-based DM devices " ) ;
module_param ( dm_mq_nr_hw_queues , uint , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( dm_mq_nr_hw_queues , " Number of hardware queues for request-based dm-mq devices " ) ;
module_param ( dm_mq_queue_depth , uint , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( dm_mq_queue_depth , " Queue depth for request-based dm-mq devices " ) ;