2018-07-06 19:38:39 +02:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/vmalloc.h>
2020-10-29 20:05:00 +09:00
# include <linux/bitmap.h>
2018-07-06 19:38:39 +02:00
# include "null_blk.h"
2020-03-25 10:49:56 -07:00
# define CREATE_TRACE_POINTS
# include "null_blk_trace.h"
2018-07-06 19:38:39 +02:00
/* zone_size in MBs to sectors. */
# define ZONE_SIZE_SHIFT 11
static inline unsigned int null_zone_no ( struct nullb_device * dev , sector_t sect )
{
return sect > > ilog2 ( dev - > zone_size_sects ) ;
}
2020-04-23 12:02:38 +09:00
int null_init_zoned_dev ( struct nullb_device * dev , struct request_queue * q )
2018-07-06 19:38:39 +02:00
{
sector_t dev_size = ( sector_t ) dev - > size * 1024 * 1024 ;
sector_t sector = 0 ;
unsigned int i ;
if ( ! is_power_of_2 ( dev - > zone_size ) ) {
2019-09-16 11:07:59 -03:00
pr_err ( " zone_size must be power-of-two \n " ) ;
2018-07-06 19:38:39 +02:00
return - EINVAL ;
}
2020-05-20 16:01:51 -07:00
if ( dev - > zone_size > dev - > size ) {
pr_err ( " Zone size larger than device capacity \n " ) ;
return - EINVAL ;
}
2018-07-06 19:38:39 +02:00
2020-06-29 12:06:38 -07:00
if ( ! dev - > zone_capacity )
dev - > zone_capacity = dev - > zone_size ;
if ( dev - > zone_capacity > dev - > zone_size ) {
pr_err ( " null_blk: zone capacity (%lu MB) larger than zone size (%lu MB) \n " ,
dev - > zone_capacity , dev - > zone_size ) ;
return - EINVAL ;
}
2018-07-06 19:38:39 +02:00
dev - > zone_size_sects = dev - > zone_size < < ZONE_SIZE_SHIFT ;
dev - > nr_zones = dev_size > >
( SECTOR_SHIFT + ilog2 ( dev - > zone_size_sects ) ) ;
dev - > zones = kvmalloc_array ( dev - > nr_zones , sizeof ( struct blk_zone ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! dev - > zones )
return - ENOMEM ;
2020-11-06 20:01:41 +09:00
/*
* With memory backing , the zone_lock spinlock needs to be temporarily
* released to avoid scheduling in atomic context . To guarantee zone
* information protection , use a bitmap to lock zones with
* wait_on_bit_lock_io ( ) . Sleeping on the lock is OK as memory backing
* implies that the queue is marked with BLK_MQ_F_BLOCKING .
*/
spin_lock_init ( & dev - > zone_lock ) ;
if ( dev - > memory_backed ) {
dev - > zone_locks = bitmap_zalloc ( dev - > nr_zones , GFP_KERNEL ) ;
if ( ! dev - > zone_locks ) {
kvfree ( dev - > zones ) ;
return - ENOMEM ;
}
2020-10-29 20:05:00 +09:00
}
2018-10-30 16:14:05 +09:00
if ( dev - > zone_nr_conv > = dev - > nr_zones ) {
dev - > zone_nr_conv = dev - > nr_zones - 1 ;
2019-09-16 11:07:59 -03:00
pr_info ( " changed the number of conventional zones to %u " ,
2018-10-30 16:14:05 +09:00
dev - > zone_nr_conv ) ;
}
2020-08-28 12:54:00 +02:00
/* Max active zones has to be < nbr of seq zones in order to be enforceable */
if ( dev - > zone_max_active > = dev - > nr_zones - dev - > zone_nr_conv ) {
dev - > zone_max_active = 0 ;
pr_info ( " zone_max_active limit disabled, limit >= zone count \n " ) ;
}
/* Max open zones has to be <= max active zones */
if ( dev - > zone_max_active & & dev - > zone_max_open > dev - > zone_max_active ) {
dev - > zone_max_open = dev - > zone_max_active ;
pr_info ( " changed the maximum number of open zones to %u \n " ,
dev - > nr_zones ) ;
} else if ( dev - > zone_max_open > = dev - > nr_zones - dev - > zone_nr_conv ) {
dev - > zone_max_open = 0 ;
pr_info ( " zone_max_open limit disabled, limit >= zone count \n " ) ;
}
2018-10-30 16:14:05 +09:00
for ( i = 0 ; i < dev - > zone_nr_conv ; i + + ) {
struct blk_zone * zone = & dev - > zones [ i ] ;
zone - > start = sector ;
zone - > len = dev - > zone_size_sects ;
2020-06-29 12:06:37 -07:00
zone - > capacity = zone - > len ;
2018-10-30 16:14:05 +09:00
zone - > wp = zone - > start + zone - > len ;
zone - > type = BLK_ZONE_TYPE_CONVENTIONAL ;
zone - > cond = BLK_ZONE_COND_NOT_WP ;
sector + = dev - > zone_size_sects ;
}
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
2018-07-06 19:38:39 +02:00
struct blk_zone * zone = & dev - > zones [ i ] ;
zone - > start = zone - > wp = sector ;
zone - > len = dev - > zone_size_sects ;
2020-06-29 12:06:38 -07:00
zone - > capacity = dev - > zone_capacity < < ZONE_SIZE_SHIFT ;
2018-07-06 19:38:39 +02:00
zone - > type = BLK_ZONE_TYPE_SEQWRITE_REQ ;
zone - > cond = BLK_ZONE_COND_EMPTY ;
sector + = dev - > zone_size_sects ;
}
2020-04-23 12:02:38 +09:00
q - > limits . zoned = BLK_ZONED_HM ;
blk_queue_flag_set ( QUEUE_FLAG_ZONE_RESETALL , q ) ;
blk_queue_required_elevator_features ( q , ELEVATOR_F_ZBD_SEQ_WRITE ) ;
return 0 ;
}
int null_register_zoned_dev ( struct nullb * nullb )
{
2020-05-12 17:55:52 +09:00
struct nullb_device * dev = nullb - > dev ;
2020-04-23 12:02:38 +09:00
struct request_queue * q = nullb - > q ;
2020-05-12 17:55:52 +09:00
if ( queue_is_mq ( q ) ) {
int ret = blk_revalidate_disk_zones ( nullb - > disk , NULL ) ;
if ( ret )
return ret ;
} else {
blk_queue_chunk_sectors ( q , dev - > zone_size_sects ) ;
q - > nr_zones = blkdev_nr_zones ( nullb - > disk ) ;
}
2020-04-23 12:02:38 +09:00
2020-05-12 17:55:52 +09:00
blk_queue_max_zone_append_sectors ( q , dev - > zone_size_sects ) ;
2020-08-28 12:54:00 +02:00
blk_queue_max_open_zones ( q , dev - > zone_max_open ) ;
blk_queue_max_active_zones ( q , dev - > zone_max_active ) ;
2020-04-23 12:02:38 +09:00
2018-07-06 19:38:39 +02:00
return 0 ;
}
2020-04-23 12:02:38 +09:00
void null_free_zoned_dev ( struct nullb_device * dev )
2018-07-06 19:38:39 +02:00
{
2020-10-29 20:05:00 +09:00
bitmap_free ( dev - > zone_locks ) ;
2018-07-06 19:38:39 +02:00
kvfree ( dev - > zones ) ;
}
2020-10-29 20:05:00 +09:00
static inline void null_lock_zone ( struct nullb_device * dev , unsigned int zno )
{
2020-11-06 20:01:41 +09:00
if ( dev - > memory_backed )
wait_on_bit_lock_io ( dev - > zone_locks , zno , TASK_UNINTERRUPTIBLE ) ;
spin_lock_irq ( & dev - > zone_lock ) ;
2020-10-29 20:05:00 +09:00
}
static inline void null_unlock_zone ( struct nullb_device * dev , unsigned int zno )
{
2020-11-06 20:01:41 +09:00
spin_unlock_irq ( & dev - > zone_lock ) ;
if ( dev - > memory_backed )
clear_and_wake_up_bit ( zno , dev - > zone_locks ) ;
2020-10-29 20:05:00 +09:00
}
2019-11-11 11:39:27 +09:00
int null_report_zones ( struct gendisk * disk , sector_t sector ,
2019-11-11 11:39:30 +09:00
unsigned int nr_zones , report_zones_cb cb , void * data )
2018-07-06 19:38:39 +02:00
{
2018-10-12 19:08:49 +09:00
struct nullb * nullb = disk - > private_data ;
struct nullb_device * dev = nullb - > dev ;
2020-10-29 20:05:00 +09:00
unsigned int first_zone , i , zno ;
2019-11-11 11:39:30 +09:00
struct blk_zone zone ;
int error ;
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
first_zone = null_zone_no ( dev , sector ) ;
if ( first_zone > = dev - > nr_zones )
return 0 ;
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
nr_zones = min ( nr_zones , dev - > nr_zones - first_zone ) ;
2020-03-25 10:49:56 -07:00
trace_nullb_report_zones ( nullb , nr_zones ) ;
2020-10-29 20:05:00 +09:00
zno = first_zone ;
for ( i = 0 ; i < nr_zones ; i + + , zno + + ) {
2019-11-11 11:39:30 +09:00
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback .
* So use a local copy to avoid corruption of the device zone
* array .
*/
2020-10-29 20:05:00 +09:00
null_lock_zone ( dev , zno ) ;
memcpy ( & zone , & dev - > zones [ zno ] , sizeof ( struct blk_zone ) ) ;
null_unlock_zone ( dev , zno ) ;
2020-09-28 15:25:49 +05:30
2019-11-11 11:39:30 +09:00
error = cb ( & zone , i , data ) ;
if ( error )
return error ;
}
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
return nr_zones ;
2018-07-06 19:38:39 +02:00
}
2020-10-29 20:05:00 +09:00
/*
* This is called in the case of memory backing from null_process_cmd ( )
* with the target zone already locked .
*/
2019-10-17 14:19:43 -07:00
size_t null_zone_valid_read_len ( struct nullb * nullb ,
sector_t sector , unsigned int len )
{
struct nullb_device * dev = nullb - > dev ;
struct blk_zone * zone = & dev - > zones [ null_zone_no ( dev , sector ) ] ;
unsigned int nr_sectors = len > > SECTOR_SHIFT ;
/* Read must be below the write pointer position */
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL | |
sector + nr_sectors < = zone - > wp )
return len ;
if ( sector > zone - > wp )
return 0 ;
return ( zone - > wp - sector ) < < SECTOR_SHIFT ;
}
2020-08-28 12:54:00 +02:00
static blk_status_t null_close_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_CLOSED :
/* close operation on closed is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_EMPTY :
case BLK_ZONE_COND_FULL :
default :
return BLK_STS_IOERR ;
}
if ( zone - > wp = = zone - > start ) {
zone - > cond = BLK_ZONE_COND_EMPTY ;
} else {
zone - > cond = BLK_ZONE_COND_CLOSED ;
dev - > nr_zones_closed + + ;
}
return BLK_STS_OK ;
}
static void null_close_first_imp_zone ( struct nullb_device * dev )
{
unsigned int i ;
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
if ( dev - > zones [ i ] . cond = = BLK_ZONE_COND_IMP_OPEN ) {
null_close_zone ( dev , & dev - > zones [ i ] ) ;
return ;
}
}
}
2020-10-22 08:47:39 -07:00
static blk_status_t null_check_active ( struct nullb_device * dev )
2020-08-28 12:54:00 +02:00
{
if ( ! dev - > zone_max_active )
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
if ( dev - > nr_zones_exp_open + dev - > nr_zones_imp_open +
dev - > nr_zones_closed < dev - > zone_max_active )
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
2020-10-22 08:47:39 -07:00
return BLK_STS_ZONE_ACTIVE_RESOURCE ;
2020-08-28 12:54:00 +02:00
}
2020-10-22 08:47:39 -07:00
static blk_status_t null_check_open ( struct nullb_device * dev )
2020-08-28 12:54:00 +02:00
{
if ( ! dev - > zone_max_open )
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
if ( dev - > nr_zones_exp_open + dev - > nr_zones_imp_open < dev - > zone_max_open )
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
2020-10-22 08:47:39 -07:00
if ( dev - > nr_zones_imp_open ) {
if ( null_check_active ( dev ) = = BLK_STS_OK ) {
null_close_first_imp_zone ( dev ) ;
return BLK_STS_OK ;
}
2020-08-28 12:54:00 +02:00
}
2020-10-22 08:47:39 -07:00
return BLK_STS_ZONE_OPEN_RESOURCE ;
2020-08-28 12:54:00 +02:00
}
/*
* This function matches the manage open zone resources function in the ZBC standard ,
* with the addition of max active zones support ( added in the ZNS standard ) .
*
* The function determines if a zone can transition to implicit open or explicit open ,
* while maintaining the max open zone ( and max active zone ) limit ( s ) . It may close an
* implicit open zone in order to make additional zone resources available .
*
* ZBC states that an implicit open zone shall be closed only if there is not
* room within the open limit . However , with the addition of an active limit ,
* it is not certain that closing an implicit open zone will allow a new zone
* to be opened , since we might already be at the active limit capacity .
*/
2020-10-22 08:47:39 -07:00
static blk_status_t null_check_zone_resources ( struct nullb_device * dev , struct blk_zone * zone )
2020-08-28 12:54:00 +02:00
{
2020-10-22 08:47:39 -07:00
blk_status_t ret ;
2020-08-28 12:54:00 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EMPTY :
2020-10-22 08:47:39 -07:00
ret = null_check_active ( dev ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 12:54:00 +02:00
fallthrough ;
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
return null_check_open ( dev ) ;
2020-08-28 12:54:00 +02:00
default :
/* Should never be called for other states */
WARN_ON ( 1 ) ;
2020-10-22 08:47:39 -07:00
return BLK_STS_IOERR ;
2020-08-28 12:54:00 +02:00
}
}
2019-08-22 21:45:18 -07:00
static blk_status_t null_zone_write ( struct nullb_cmd * cmd , sector_t sector ,
2020-05-12 17:55:52 +09:00
unsigned int nr_sectors , bool append )
2018-07-06 19:38:39 +02:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
unsigned int zno = null_zone_no ( dev , sector ) ;
struct blk_zone * zone = & dev - > zones [ zno ] ;
2020-04-23 12:02:37 +09:00
blk_status_t ret ;
trace_nullb_zone_op ( cmd , zno , zone - > cond ) ;
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return null_process_cmd ( cmd , REQ_OP_WRITE , sector , nr_sectors ) ;
2018-07-06 19:38:39 +02:00
2020-10-29 20:05:00 +09:00
null_lock_zone ( dev , zno ) ;
2018-07-06 19:38:39 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_FULL :
/* Cannot write to a full zone */
2020-10-29 20:05:00 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2018-07-06 19:38:39 +02:00
case BLK_ZONE_COND_EMPTY :
2020-08-28 12:54:00 +02:00
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
2020-10-29 20:05:00 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
break ;
2018-07-06 19:38:39 +02:00
case BLK_ZONE_COND_IMP_OPEN :
2020-01-09 14:03:55 +09:00
case BLK_ZONE_COND_EXP_OPEN :
2020-08-28 12:54:00 +02:00
break ;
default :
/* Invalid zone condition */
2020-10-29 20:05:00 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2020-08-28 12:54:00 +02:00
}
/*
* Regular writes must be at the write pointer position .
* Zone append writes are automatically issued at the write
* pointer and the position returned using the request or BIO
* sector .
*/
if ( append ) {
sector = zone - > wp ;
if ( cmd - > bio )
cmd - > bio - > bi_iter . bi_sector = sector ;
else
cmd - > rq - > __sector = sector ;
} else if ( sector ! = zone - > wp ) {
2020-10-29 20:05:00 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2020-08-28 12:54:00 +02:00
}
2020-10-29 20:05:00 +09:00
if ( zone - > wp + nr_sectors > zone - > start + zone - > capacity ) {
ret = BLK_STS_IOERR ;
goto unlock ;
}
2020-08-28 12:54:00 +02:00
if ( zone - > cond = = BLK_ZONE_COND_CLOSED ) {
dev - > nr_zones_closed - - ;
dev - > nr_zones_imp_open + + ;
} else if ( zone - > cond = = BLK_ZONE_COND_EMPTY ) {
dev - > nr_zones_imp_open + + ;
}
if ( zone - > cond ! = BLK_ZONE_COND_EXP_OPEN )
zone - > cond = BLK_ZONE_COND_IMP_OPEN ;
2020-11-06 20:01:41 +09:00
/*
* Memory backing allocation may sleep : release the zone_lock spinlock
* to avoid scheduling in atomic context . Zone operation atomicity is
* still guaranteed through the zone_locks bitmap .
*/
if ( dev - > memory_backed )
spin_unlock_irq ( & dev - > zone_lock ) ;
2020-08-28 12:54:00 +02:00
ret = null_process_cmd ( cmd , REQ_OP_WRITE , sector , nr_sectors ) ;
2020-11-06 20:01:41 +09:00
if ( dev - > memory_backed )
spin_lock_irq ( & dev - > zone_lock ) ;
2020-08-28 12:54:00 +02:00
if ( ret ! = BLK_STS_OK )
2020-10-29 20:05:00 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
zone - > wp + = nr_sectors ;
if ( zone - > wp = = zone - > start + zone - > capacity ) {
if ( zone - > cond = = BLK_ZONE_COND_EXP_OPEN )
dev - > nr_zones_exp_open - - ;
else if ( zone - > cond = = BLK_ZONE_COND_IMP_OPEN )
dev - > nr_zones_imp_open - - ;
zone - > cond = BLK_ZONE_COND_FULL ;
}
2020-10-29 20:05:00 +09:00
ret = BLK_STS_OK ;
unlock :
null_unlock_zone ( dev , zno ) ;
return ret ;
2020-08-28 12:54:00 +02:00
}
static blk_status_t null_open_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
2020-10-22 08:47:39 -07:00
blk_status_t ret ;
2020-08-28 12:54:00 +02:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_EXP_OPEN :
/* open operation on exp open is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_EMPTY :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 12:54:00 +02:00
break ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
2020-01-09 14:03:55 +09:00
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 12:54:00 +02:00
dev - > nr_zones_closed - - ;
break ;
case BLK_ZONE_COND_FULL :
default :
return BLK_STS_IOERR ;
}
zone - > cond = BLK_ZONE_COND_EXP_OPEN ;
dev - > nr_zones_exp_open + + ;
2018-07-06 19:38:39 +02:00
2020-08-28 12:54:00 +02:00
return BLK_STS_OK ;
}
static blk_status_t null_finish_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
2020-10-22 08:47:39 -07:00
blk_status_t ret ;
2020-08-28 12:54:00 +02:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_FULL :
/* finish operation on full is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_EMPTY :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 12:54:00 +02:00
break ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 12:54:00 +02:00
dev - > nr_zones_closed - - ;
break ;
default :
return BLK_STS_IOERR ;
}
2020-06-29 12:06:38 -07:00
2020-08-28 12:54:00 +02:00
zone - > cond = BLK_ZONE_COND_FULL ;
zone - > wp = zone - > start + zone - > len ;
2018-07-06 19:38:39 +02:00
2020-08-28 12:54:00 +02:00
return BLK_STS_OK ;
}
static blk_status_t null_reset_zone ( struct nullb_device * dev , struct blk_zone * zone )
{
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
2020-04-23 12:02:37 +09:00
2020-08-28 12:54:00 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EMPTY :
/* reset operation on empty is not an error */
2020-04-23 12:02:37 +09:00
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_CLOSED :
dev - > nr_zones_closed - - ;
break ;
case BLK_ZONE_COND_FULL :
break ;
2018-07-06 19:38:39 +02:00
default :
2019-08-22 21:45:18 -07:00
return BLK_STS_IOERR ;
2018-07-06 19:38:39 +02:00
}
2020-08-28 12:54:00 +02:00
zone - > cond = BLK_ZONE_COND_EMPTY ;
zone - > wp = zone - > start ;
return BLK_STS_OK ;
2018-07-06 19:38:39 +02:00
}
2019-10-27 23:05:49 +09:00
static blk_status_t null_zone_mgmt ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector )
2018-07-06 19:38:39 +02:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
2020-10-29 20:05:00 +09:00
unsigned int zone_no ;
struct blk_zone * zone ;
blk_status_t ret ;
2019-08-01 10:26:38 -07:00
size_t i ;
2020-10-29 20:05:00 +09:00
if ( op = = REQ_OP_ZONE_RESET_ALL ) {
2020-10-29 20:04:59 +09:00
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
2020-10-29 20:05:00 +09:00
null_lock_zone ( dev , i ) ;
2020-10-29 20:04:59 +09:00
zone = & dev - > zones [ i ] ;
if ( zone - > cond ! = BLK_ZONE_COND_EMPTY ) {
null_reset_zone ( dev , zone ) ;
trace_nullb_zone_op ( cmd , i , zone - > cond ) ;
}
2020-10-29 20:05:00 +09:00
null_unlock_zone ( dev , i ) ;
2020-10-29 20:04:59 +09:00
}
return BLK_STS_OK ;
2020-10-29 20:05:00 +09:00
}
zone_no = null_zone_no ( dev , sector ) ;
zone = & dev - > zones [ zone_no ] ;
null_lock_zone ( dev , zone_no ) ;
switch ( op ) {
2019-08-01 10:26:38 -07:00
case REQ_OP_ZONE_RESET :
2020-08-28 12:54:00 +02:00
ret = null_reset_zone ( dev , zone ) ;
2019-08-01 10:26:38 -07:00
break ;
2019-10-27 23:05:49 +09:00
case REQ_OP_ZONE_OPEN :
2020-08-28 12:54:00 +02:00
ret = null_open_zone ( dev , zone ) ;
2019-10-27 23:05:49 +09:00
break ;
case REQ_OP_ZONE_CLOSE :
2020-08-28 12:54:00 +02:00
ret = null_close_zone ( dev , zone ) ;
2019-10-27 23:05:49 +09:00
break ;
case REQ_OP_ZONE_FINISH :
2020-08-28 12:54:00 +02:00
ret = null_finish_zone ( dev , zone ) ;
2019-10-27 23:05:49 +09:00
break ;
2019-08-01 10:26:38 -07:00
default :
2020-10-29 20:05:00 +09:00
ret = BLK_STS_NOTSUPP ;
break ;
2018-10-30 16:14:05 +09:00
}
2020-03-25 10:49:56 -07:00
2020-08-28 12:54:00 +02:00
if ( ret = = BLK_STS_OK )
trace_nullb_zone_op ( cmd , zone_no , zone - > cond ) ;
2020-10-29 20:05:00 +09:00
null_unlock_zone ( dev , zone_no ) ;
2020-08-28 12:54:00 +02:00
return ret ;
2019-08-22 21:45:18 -07:00
}
2020-04-23 12:02:37 +09:00
blk_status_t null_process_zoned_cmd ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector , sector_t nr_sectors )
2019-08-22 21:45:18 -07:00
{
2020-09-28 15:25:49 +05:30
struct nullb_device * dev = cmd - > nq - > dev ;
2020-10-29 20:05:00 +09:00
unsigned int zno = null_zone_no ( dev , sector ) ;
blk_status_t sts ;
2020-09-28 15:25:49 +05:30
2019-08-22 21:45:18 -07:00
switch ( op ) {
case REQ_OP_WRITE :
2020-09-28 15:25:49 +05:30
sts = null_zone_write ( cmd , sector , nr_sectors , false ) ;
break ;
2020-05-12 17:55:52 +09:00
case REQ_OP_ZONE_APPEND :
2020-09-28 15:25:49 +05:30
sts = null_zone_write ( cmd , sector , nr_sectors , true ) ;
break ;
2019-08-22 21:45:18 -07:00
case REQ_OP_ZONE_RESET :
case REQ_OP_ZONE_RESET_ALL :
2019-10-27 23:05:49 +09:00
case REQ_OP_ZONE_OPEN :
case REQ_OP_ZONE_CLOSE :
case REQ_OP_ZONE_FINISH :
2020-09-28 15:25:49 +05:30
sts = null_zone_mgmt ( cmd , op , sector ) ;
break ;
2019-08-22 21:45:18 -07:00
default :
2020-10-29 20:05:00 +09:00
null_lock_zone ( dev , zno ) ;
2020-09-28 15:25:49 +05:30
sts = null_process_cmd ( cmd , op , sector , nr_sectors ) ;
2020-10-29 20:05:00 +09:00
null_unlock_zone ( dev , zno ) ;
2019-08-22 21:45:18 -07:00
}
2020-09-28 15:25:49 +05:30
return sts ;
2018-07-06 19:38:39 +02:00
}