2018-07-06 20:38:39 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/vmalloc.h>
# include "null_blk.h"
2020-03-25 20:49:56 +03:00
# define CREATE_TRACE_POINTS
# include "null_blk_trace.h"
2018-07-06 20:38:39 +03:00
/* zone_size in MBs to sectors. */
# define ZONE_SIZE_SHIFT 11
static inline unsigned int null_zone_no ( struct nullb_device * dev , sector_t sect )
{
return sect > > ilog2 ( dev - > zone_size_sects ) ;
}
2020-04-23 06:02:38 +03:00
int null_init_zoned_dev ( struct nullb_device * dev , struct request_queue * q )
2018-07-06 20:38:39 +03:00
{
sector_t dev_size = ( sector_t ) dev - > size * 1024 * 1024 ;
sector_t sector = 0 ;
unsigned int i ;
if ( ! is_power_of_2 ( dev - > zone_size ) ) {
2019-09-16 17:07:59 +03:00
pr_err ( " zone_size must be power-of-two \n " ) ;
2018-07-06 20:38:39 +03:00
return - EINVAL ;
}
2020-05-21 02:01:51 +03:00
if ( dev - > zone_size > dev - > size ) {
pr_err ( " Zone size larger than device capacity \n " ) ;
return - EINVAL ;
}
2018-07-06 20:38:39 +03:00
2020-06-29 22:06:38 +03:00
if ( ! dev - > zone_capacity )
dev - > zone_capacity = dev - > zone_size ;
if ( dev - > zone_capacity > dev - > zone_size ) {
pr_err ( " null_blk: zone capacity (%lu MB) larger than zone size (%lu MB) \n " ,
dev - > zone_capacity , dev - > zone_size ) ;
return - EINVAL ;
}
2018-07-06 20:38:39 +03:00
dev - > zone_size_sects = dev - > zone_size < < ZONE_SIZE_SHIFT ;
dev - > nr_zones = dev_size > >
( SECTOR_SHIFT + ilog2 ( dev - > zone_size_sects ) ) ;
dev - > zones = kvmalloc_array ( dev - > nr_zones , sizeof ( struct blk_zone ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! dev - > zones )
return - ENOMEM ;
2018-10-30 10:14:05 +03:00
if ( dev - > zone_nr_conv > = dev - > nr_zones ) {
dev - > zone_nr_conv = dev - > nr_zones - 1 ;
2019-09-16 17:07:59 +03:00
pr_info ( " changed the number of conventional zones to %u " ,
2018-10-30 10:14:05 +03:00
dev - > zone_nr_conv ) ;
}
2020-08-28 13:54:00 +03:00
/* Max active zones has to be < nbr of seq zones in order to be enforceable */
if ( dev - > zone_max_active > = dev - > nr_zones - dev - > zone_nr_conv ) {
dev - > zone_max_active = 0 ;
pr_info ( " zone_max_active limit disabled, limit >= zone count \n " ) ;
}
/* Max open zones has to be <= max active zones */
if ( dev - > zone_max_active & & dev - > zone_max_open > dev - > zone_max_active ) {
dev - > zone_max_open = dev - > zone_max_active ;
pr_info ( " changed the maximum number of open zones to %u \n " ,
dev - > nr_zones ) ;
} else if ( dev - > zone_max_open > = dev - > nr_zones - dev - > zone_nr_conv ) {
dev - > zone_max_open = 0 ;
pr_info ( " zone_max_open limit disabled, limit >= zone count \n " ) ;
}
2018-10-30 10:14:05 +03:00
for ( i = 0 ; i < dev - > zone_nr_conv ; i + + ) {
struct blk_zone * zone = & dev - > zones [ i ] ;
zone - > start = sector ;
zone - > len = dev - > zone_size_sects ;
2020-06-29 22:06:37 +03:00
zone - > capacity = zone - > len ;
2018-10-30 10:14:05 +03:00
zone - > wp = zone - > start + zone - > len ;
zone - > type = BLK_ZONE_TYPE_CONVENTIONAL ;
zone - > cond = BLK_ZONE_COND_NOT_WP ;
sector + = dev - > zone_size_sects ;
}
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
2018-07-06 20:38:39 +03:00
struct blk_zone * zone = & dev - > zones [ i ] ;
zone - > start = zone - > wp = sector ;
zone - > len = dev - > zone_size_sects ;
2020-06-29 22:06:38 +03:00
zone - > capacity = dev - > zone_capacity < < ZONE_SIZE_SHIFT ;
2018-07-06 20:38:39 +03:00
zone - > type = BLK_ZONE_TYPE_SEQWRITE_REQ ;
zone - > cond = BLK_ZONE_COND_EMPTY ;
sector + = dev - > zone_size_sects ;
}
2020-04-23 06:02:38 +03:00
q - > limits . zoned = BLK_ZONED_HM ;
blk_queue_flag_set ( QUEUE_FLAG_ZONE_RESETALL , q ) ;
blk_queue_required_elevator_features ( q , ELEVATOR_F_ZBD_SEQ_WRITE ) ;
return 0 ;
}
int null_register_zoned_dev ( struct nullb * nullb )
{
2020-05-12 11:55:52 +03:00
struct nullb_device * dev = nullb - > dev ;
2020-04-23 06:02:38 +03:00
struct request_queue * q = nullb - > q ;
2020-05-12 11:55:52 +03:00
if ( queue_is_mq ( q ) ) {
int ret = blk_revalidate_disk_zones ( nullb - > disk , NULL ) ;
if ( ret )
return ret ;
} else {
blk_queue_chunk_sectors ( q , dev - > zone_size_sects ) ;
q - > nr_zones = blkdev_nr_zones ( nullb - > disk ) ;
}
2020-04-23 06:02:38 +03:00
2020-05-12 11:55:52 +03:00
blk_queue_max_zone_append_sectors ( q , dev - > zone_size_sects ) ;
2020-08-28 13:54:00 +03:00
blk_queue_max_open_zones ( q , dev - > zone_max_open ) ;
blk_queue_max_active_zones ( q , dev - > zone_max_active ) ;
2020-04-23 06:02:38 +03:00
2018-07-06 20:38:39 +03:00
return 0 ;
}
2020-04-23 06:02:38 +03:00
void null_free_zoned_dev ( struct nullb_device * dev )
2018-07-06 20:38:39 +03:00
{
kvfree ( dev - > zones ) ;
}
2019-11-11 05:39:27 +03:00
int null_report_zones ( struct gendisk * disk , sector_t sector ,
2019-11-11 05:39:30 +03:00
unsigned int nr_zones , report_zones_cb cb , void * data )
2018-07-06 20:38:39 +03:00
{
2018-10-12 13:08:49 +03:00
struct nullb * nullb = disk - > private_data ;
struct nullb_device * dev = nullb - > dev ;
2019-11-11 05:39:30 +03:00
unsigned int first_zone , i ;
struct blk_zone zone ;
int error ;
2018-07-06 20:38:39 +03:00
2019-11-11 05:39:30 +03:00
first_zone = null_zone_no ( dev , sector ) ;
if ( first_zone > = dev - > nr_zones )
return 0 ;
2018-07-06 20:38:39 +03:00
2019-11-11 05:39:30 +03:00
nr_zones = min ( nr_zones , dev - > nr_zones - first_zone ) ;
2020-03-25 20:49:56 +03:00
trace_nullb_report_zones ( nullb , nr_zones ) ;
2019-11-11 05:39:30 +03:00
for ( i = 0 ; i < nr_zones ; i + + ) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback .
* So use a local copy to avoid corruption of the device zone
* array .
*/
memcpy ( & zone , & dev - > zones [ first_zone + i ] ,
sizeof ( struct blk_zone ) ) ;
error = cb ( & zone , i , data ) ;
if ( error )
return error ;
}
2018-07-06 20:38:39 +03:00
2019-11-11 05:39:30 +03:00
return nr_zones ;
2018-07-06 20:38:39 +03:00
}
2019-10-18 00:19:43 +03:00
size_t null_zone_valid_read_len ( struct nullb * nullb ,
sector_t sector , unsigned int len )
{
struct nullb_device * dev = nullb - > dev ;
struct blk_zone * zone = & dev - > zones [ null_zone_no ( dev , sector ) ] ;
unsigned int nr_sectors = len > > SECTOR_SHIFT ;
/* Read must be below the write pointer position */
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL | |
sector + nr_sectors < = zone - > wp )
return len ;
if ( sector > zone - > wp )
return 0 ;
return ( zone - > wp - sector ) < < SECTOR_SHIFT ;
}
2020-08-28 13:54:00 +03:00
static blk_status_t null_close_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_CLOSED :
/* close operation on closed is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_EMPTY :
case BLK_ZONE_COND_FULL :
default :
return BLK_STS_IOERR ;
}
if ( zone - > wp = = zone - > start ) {
zone - > cond = BLK_ZONE_COND_EMPTY ;
} else {
zone - > cond = BLK_ZONE_COND_CLOSED ;
dev - > nr_zones_closed + + ;
}
return BLK_STS_OK ;
}
static void null_close_first_imp_zone ( struct nullb_device * dev )
{
unsigned int i ;
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
if ( dev - > zones [ i ] . cond = = BLK_ZONE_COND_IMP_OPEN ) {
null_close_zone ( dev , & dev - > zones [ i ] ) ;
return ;
}
}
}
2020-10-22 18:47:39 +03:00
static blk_status_t null_check_active ( struct nullb_device * dev )
2020-08-28 13:54:00 +03:00
{
if ( ! dev - > zone_max_active )
2020-10-22 18:47:39 +03:00
return BLK_STS_OK ;
if ( dev - > nr_zones_exp_open + dev - > nr_zones_imp_open +
dev - > nr_zones_closed < dev - > zone_max_active )
return BLK_STS_OK ;
2020-08-28 13:54:00 +03:00
2020-10-22 18:47:39 +03:00
return BLK_STS_ZONE_ACTIVE_RESOURCE ;
2020-08-28 13:54:00 +03:00
}
2020-10-22 18:47:39 +03:00
static blk_status_t null_check_open ( struct nullb_device * dev )
2020-08-28 13:54:00 +03:00
{
if ( ! dev - > zone_max_open )
2020-10-22 18:47:39 +03:00
return BLK_STS_OK ;
2020-08-28 13:54:00 +03:00
if ( dev - > nr_zones_exp_open + dev - > nr_zones_imp_open < dev - > zone_max_open )
2020-10-22 18:47:39 +03:00
return BLK_STS_OK ;
2020-08-28 13:54:00 +03:00
2020-10-22 18:47:39 +03:00
if ( dev - > nr_zones_imp_open ) {
if ( null_check_active ( dev ) = = BLK_STS_OK ) {
null_close_first_imp_zone ( dev ) ;
return BLK_STS_OK ;
}
2020-08-28 13:54:00 +03:00
}
2020-10-22 18:47:39 +03:00
return BLK_STS_ZONE_OPEN_RESOURCE ;
2020-08-28 13:54:00 +03:00
}
/*
* This function matches the manage open zone resources function in the ZBC standard ,
* with the addition of max active zones support ( added in the ZNS standard ) .
*
* The function determines if a zone can transition to implicit open or explicit open ,
* while maintaining the max open zone ( and max active zone ) limit ( s ) . It may close an
* implicit open zone in order to make additional zone resources available .
*
* ZBC states that an implicit open zone shall be closed only if there is not
* room within the open limit . However , with the addition of an active limit ,
* it is not certain that closing an implicit open zone will allow a new zone
* to be opened , since we might already be at the active limit capacity .
*/
2020-10-22 18:47:39 +03:00
static blk_status_t null_check_zone_resources ( struct nullb_device * dev , struct blk_zone * zone )
2020-08-28 13:54:00 +03:00
{
2020-10-22 18:47:39 +03:00
blk_status_t ret ;
2020-08-28 13:54:00 +03:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EMPTY :
2020-10-22 18:47:39 +03:00
ret = null_check_active ( dev ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 13:54:00 +03:00
fallthrough ;
case BLK_ZONE_COND_CLOSED :
2020-10-22 18:47:39 +03:00
return null_check_open ( dev ) ;
2020-08-28 13:54:00 +03:00
default :
/* Should never be called for other states */
WARN_ON ( 1 ) ;
2020-10-22 18:47:39 +03:00
return BLK_STS_IOERR ;
2020-08-28 13:54:00 +03:00
}
}
2019-08-23 07:45:18 +03:00
static blk_status_t null_zone_write ( struct nullb_cmd * cmd , sector_t sector ,
2020-05-12 11:55:52 +03:00
unsigned int nr_sectors , bool append )
2018-07-06 20:38:39 +03:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
unsigned int zno = null_zone_no ( dev , sector ) ;
struct blk_zone * zone = & dev - > zones [ zno ] ;
2020-04-23 06:02:37 +03:00
blk_status_t ret ;
trace_nullb_zone_op ( cmd , zno , zone - > cond ) ;
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return null_process_cmd ( cmd , REQ_OP_WRITE , sector , nr_sectors ) ;
2018-07-06 20:38:39 +03:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_FULL :
/* Cannot write to a full zone */
2019-08-23 07:45:18 +03:00
return BLK_STS_IOERR ;
2018-07-06 20:38:39 +03:00
case BLK_ZONE_COND_EMPTY :
2020-08-28 13:54:00 +03:00
case BLK_ZONE_COND_CLOSED :
2020-10-22 18:47:39 +03:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 13:54:00 +03:00
break ;
2018-07-06 20:38:39 +03:00
case BLK_ZONE_COND_IMP_OPEN :
2020-01-09 08:03:55 +03:00
case BLK_ZONE_COND_EXP_OPEN :
2020-08-28 13:54:00 +03:00
break ;
default :
/* Invalid zone condition */
return BLK_STS_IOERR ;
}
/*
* Regular writes must be at the write pointer position .
* Zone append writes are automatically issued at the write
* pointer and the position returned using the request or BIO
* sector .
*/
if ( append ) {
sector = zone - > wp ;
if ( cmd - > bio )
cmd - > bio - > bi_iter . bi_sector = sector ;
else
cmd - > rq - > __sector = sector ;
} else if ( sector ! = zone - > wp ) {
return BLK_STS_IOERR ;
}
if ( zone - > wp + nr_sectors > zone - > start + zone - > capacity )
return BLK_STS_IOERR ;
if ( zone - > cond = = BLK_ZONE_COND_CLOSED ) {
dev - > nr_zones_closed - - ;
dev - > nr_zones_imp_open + + ;
} else if ( zone - > cond = = BLK_ZONE_COND_EMPTY ) {
dev - > nr_zones_imp_open + + ;
}
if ( zone - > cond ! = BLK_ZONE_COND_EXP_OPEN )
zone - > cond = BLK_ZONE_COND_IMP_OPEN ;
ret = null_process_cmd ( cmd , REQ_OP_WRITE , sector , nr_sectors ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
zone - > wp + = nr_sectors ;
if ( zone - > wp = = zone - > start + zone - > capacity ) {
if ( zone - > cond = = BLK_ZONE_COND_EXP_OPEN )
dev - > nr_zones_exp_open - - ;
else if ( zone - > cond = = BLK_ZONE_COND_IMP_OPEN )
dev - > nr_zones_imp_open - - ;
zone - > cond = BLK_ZONE_COND_FULL ;
}
return BLK_STS_OK ;
}
static blk_status_t null_open_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
2020-10-22 18:47:39 +03:00
blk_status_t ret ;
2020-08-28 13:54:00 +03:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_EXP_OPEN :
/* open operation on exp open is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_EMPTY :
2020-10-22 18:47:39 +03:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 13:54:00 +03:00
break ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
2020-01-09 08:03:55 +03:00
case BLK_ZONE_COND_CLOSED :
2020-10-22 18:47:39 +03:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 13:54:00 +03:00
dev - > nr_zones_closed - - ;
break ;
case BLK_ZONE_COND_FULL :
default :
return BLK_STS_IOERR ;
}
zone - > cond = BLK_ZONE_COND_EXP_OPEN ;
dev - > nr_zones_exp_open + + ;
2018-07-06 20:38:39 +03:00
2020-08-28 13:54:00 +03:00
return BLK_STS_OK ;
}
static blk_status_t null_finish_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
2020-10-22 18:47:39 +03:00
blk_status_t ret ;
2020-08-28 13:54:00 +03:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_FULL :
/* finish operation on full is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_EMPTY :
2020-10-22 18:47:39 +03:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 13:54:00 +03:00
break ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_CLOSED :
2020-10-22 18:47:39 +03:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 13:54:00 +03:00
dev - > nr_zones_closed - - ;
break ;
default :
return BLK_STS_IOERR ;
}
2020-06-29 22:06:38 +03:00
2020-08-28 13:54:00 +03:00
zone - > cond = BLK_ZONE_COND_FULL ;
zone - > wp = zone - > start + zone - > len ;
2018-07-06 20:38:39 +03:00
2020-08-28 13:54:00 +03:00
return BLK_STS_OK ;
}
static blk_status_t null_reset_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
2020-04-23 06:02:37 +03:00
2020-08-28 13:54:00 +03:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EMPTY :
/* reset operation on empty is not an error */
2020-04-23 06:02:37 +03:00
return BLK_STS_OK ;
2020-08-28 13:54:00 +03:00
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_CLOSED :
dev - > nr_zones_closed - - ;
break ;
case BLK_ZONE_COND_FULL :
break ;
2018-07-06 20:38:39 +03:00
default :
2019-08-23 07:45:18 +03:00
return BLK_STS_IOERR ;
2018-07-06 20:38:39 +03:00
}
2020-08-28 13:54:00 +03:00
zone - > cond = BLK_ZONE_COND_EMPTY ;
zone - > wp = zone - > start ;
return BLK_STS_OK ;
2018-07-06 20:38:39 +03:00
}
2019-10-27 17:05:49 +03:00
static blk_status_t null_zone_mgmt ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector )
2018-07-06 20:38:39 +03:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
2020-03-25 20:49:56 +03:00
unsigned int zone_no = null_zone_no ( dev , sector ) ;
struct blk_zone * zone = & dev - > zones [ zone_no ] ;
2020-08-28 13:54:00 +03:00
blk_status_t ret = BLK_STS_OK ;
2019-08-01 20:26:38 +03:00
size_t i ;
2019-10-27 17:05:49 +03:00
switch ( op ) {
2019-08-01 20:26:38 +03:00
case REQ_OP_ZONE_RESET_ALL :
2020-08-28 13:54:00 +03:00
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + )
null_reset_zone ( dev , & dev - > zones [ i ] ) ;
2019-08-01 20:26:38 +03:00
break ;
case REQ_OP_ZONE_RESET :
2020-08-28 13:54:00 +03:00
ret = null_reset_zone ( dev , zone ) ;
2019-08-01 20:26:38 +03:00
break ;
2019-10-27 17:05:49 +03:00
case REQ_OP_ZONE_OPEN :
2020-08-28 13:54:00 +03:00
ret = null_open_zone ( dev , zone ) ;
2019-10-27 17:05:49 +03:00
break ;
case REQ_OP_ZONE_CLOSE :
2020-08-28 13:54:00 +03:00
ret = null_close_zone ( dev , zone ) ;
2019-10-27 17:05:49 +03:00
break ;
case REQ_OP_ZONE_FINISH :
2020-08-28 13:54:00 +03:00
ret = null_finish_zone ( dev , zone ) ;
2019-10-27 17:05:49 +03:00
break ;
2019-08-01 20:26:38 +03:00
default :
2019-10-09 18:38:13 +03:00
return BLK_STS_NOTSUPP ;
2018-10-30 10:14:05 +03:00
}
2020-03-25 20:49:56 +03:00
2020-08-28 13:54:00 +03:00
if ( ret = = BLK_STS_OK )
trace_nullb_zone_op ( cmd , zone_no , zone - > cond ) ;
return ret ;
2019-08-23 07:45:18 +03:00
}
2020-04-23 06:02:37 +03:00
blk_status_t null_process_zoned_cmd ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector , sector_t nr_sectors )
2019-08-23 07:45:18 +03:00
{
switch ( op ) {
case REQ_OP_WRITE :
2020-05-12 11:55:52 +03:00
return null_zone_write ( cmd , sector , nr_sectors , false ) ;
case REQ_OP_ZONE_APPEND :
return null_zone_write ( cmd , sector , nr_sectors , true ) ;
2019-08-23 07:45:18 +03:00
case REQ_OP_ZONE_RESET :
case REQ_OP_ZONE_RESET_ALL :
2019-10-27 17:05:49 +03:00
case REQ_OP_ZONE_OPEN :
case REQ_OP_ZONE_CLOSE :
case REQ_OP_ZONE_FINISH :
return null_zone_mgmt ( cmd , op , sector ) ;
2019-08-23 07:45:18 +03:00
default :
2020-04-23 06:02:37 +03:00
return null_process_cmd ( cmd , op , sector , nr_sectors ) ;
2019-08-23 07:45:18 +03:00
}
2018-07-06 20:38:39 +03:00
}