2018-07-06 19:38:39 +02:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/vmalloc.h>
2020-10-29 20:05:00 +09:00
# include <linux/bitmap.h>
2018-07-06 19:38:39 +02:00
# include "null_blk.h"
2020-03-25 10:49:56 -07:00
# define CREATE_TRACE_POINTS
2020-11-20 10:55:19 +09:00
# include "trace.h"
2020-03-25 10:49:56 -07:00
2021-01-29 23:47:25 +09:00
static inline sector_t mb_to_sects ( unsigned long mb )
{
return ( ( sector_t ) mb * SZ_1M ) > > SECTOR_SHIFT ;
}
2018-07-06 19:38:39 +02:00
static inline unsigned int null_zone_no ( struct nullb_device * dev , sector_t sect )
{
return sect > > ilog2 ( dev - > zone_size_sects ) ;
}
2020-11-20 10:55:14 +09:00
static inline void null_lock_zone_res ( struct nullb_device * dev )
{
if ( dev - > need_zone_res_mgmt )
spin_lock_irq ( & dev - > zone_res_lock ) ;
}
static inline void null_unlock_zone_res ( struct nullb_device * dev )
{
if ( dev - > need_zone_res_mgmt )
spin_unlock_irq ( & dev - > zone_res_lock ) ;
}
static inline void null_init_zone_lock ( struct nullb_device * dev ,
struct nullb_zone * zone )
{
if ( ! dev - > memory_backed )
spin_lock_init ( & zone - > spinlock ) ;
else
mutex_init ( & zone - > mutex ) ;
}
static inline void null_lock_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
{
if ( ! dev - > memory_backed )
spin_lock_irq ( & zone - > spinlock ) ;
else
mutex_lock ( & zone - > mutex ) ;
}
static inline void null_unlock_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
{
if ( ! dev - > memory_backed )
spin_unlock_irq ( & zone - > spinlock ) ;
else
mutex_unlock ( & zone - > mutex ) ;
}
2020-04-23 12:02:38 +09:00
int null_init_zoned_dev ( struct nullb_device * dev , struct request_queue * q )
2018-07-06 19:38:39 +02:00
{
2020-11-20 10:55:11 +09:00
sector_t dev_capacity_sects , zone_capacity_sects ;
2020-11-20 10:55:14 +09:00
struct nullb_zone * zone ;
2018-07-06 19:38:39 +02:00
sector_t sector = 0 ;
unsigned int i ;
if ( ! is_power_of_2 ( dev - > zone_size ) ) {
2019-09-16 11:07:59 -03:00
pr_err ( " zone_size must be power-of-two \n " ) ;
2018-07-06 19:38:39 +02:00
return - EINVAL ;
}
2020-05-20 16:01:51 -07:00
if ( dev - > zone_size > dev - > size ) {
pr_err ( " Zone size larger than device capacity \n " ) ;
return - EINVAL ;
}
2018-07-06 19:38:39 +02:00
2020-06-29 12:06:38 -07:00
if ( ! dev - > zone_capacity )
dev - > zone_capacity = dev - > zone_size ;
if ( dev - > zone_capacity > dev - > zone_size ) {
pr_err ( " null_blk: zone capacity (%lu MB) larger than zone size (%lu MB) \n " ,
dev - > zone_capacity , dev - > zone_size ) ;
return - EINVAL ;
}
2021-01-29 23:47:25 +09:00
zone_capacity_sects = mb_to_sects ( dev - > zone_capacity ) ;
dev_capacity_sects = mb_to_sects ( dev - > size ) ;
dev - > zone_size_sects = mb_to_sects ( dev - > zone_size ) ;
dev - > nr_zones = round_up ( dev_capacity_sects , dev - > zone_size_sects )
> > ilog2 ( dev - > zone_size_sects ) ;
2020-11-20 10:55:11 +09:00
2020-11-20 10:55:14 +09:00
dev - > zones = kvmalloc_array ( dev - > nr_zones , sizeof ( struct nullb_zone ) ,
GFP_KERNEL | __GFP_ZERO ) ;
2018-07-06 19:38:39 +02:00
if ( ! dev - > zones )
return - ENOMEM ;
2020-11-20 10:55:14 +09:00
spin_lock_init ( & dev - > zone_res_lock ) ;
2020-10-29 20:05:00 +09:00
2018-10-30 16:14:05 +09:00
if ( dev - > zone_nr_conv > = dev - > nr_zones ) {
dev - > zone_nr_conv = dev - > nr_zones - 1 ;
2019-09-16 11:07:59 -03:00
pr_info ( " changed the number of conventional zones to %u " ,
2018-10-30 16:14:05 +09:00
dev - > zone_nr_conv ) ;
}
2020-08-28 12:54:00 +02:00
/* Max active zones has to be < nbr of seq zones in order to be enforceable */
if ( dev - > zone_max_active > = dev - > nr_zones - dev - > zone_nr_conv ) {
dev - > zone_max_active = 0 ;
pr_info ( " zone_max_active limit disabled, limit >= zone count \n " ) ;
}
/* Max open zones has to be <= max active zones */
if ( dev - > zone_max_active & & dev - > zone_max_open > dev - > zone_max_active ) {
dev - > zone_max_open = dev - > zone_max_active ;
pr_info ( " changed the maximum number of open zones to %u \n " ,
dev - > nr_zones ) ;
} else if ( dev - > zone_max_open > = dev - > nr_zones - dev - > zone_nr_conv ) {
dev - > zone_max_open = 0 ;
pr_info ( " zone_max_open limit disabled, limit >= zone count \n " ) ;
}
2020-11-20 10:55:14 +09:00
dev - > need_zone_res_mgmt = dev - > zone_max_active | | dev - > zone_max_open ;
2020-11-20 10:55:15 +09:00
dev - > imp_close_zone_no = dev - > zone_nr_conv ;
2020-08-28 12:54:00 +02:00
2018-10-30 16:14:05 +09:00
for ( i = 0 ; i < dev - > zone_nr_conv ; i + + ) {
2020-11-20 10:55:14 +09:00
zone = & dev - > zones [ i ] ;
2018-10-30 16:14:05 +09:00
2020-11-20 10:55:14 +09:00
null_init_zone_lock ( dev , zone ) ;
2018-10-30 16:14:05 +09:00
zone - > start = sector ;
zone - > len = dev - > zone_size_sects ;
2020-06-29 12:06:37 -07:00
zone - > capacity = zone - > len ;
2018-10-30 16:14:05 +09:00
zone - > wp = zone - > start + zone - > len ;
zone - > type = BLK_ZONE_TYPE_CONVENTIONAL ;
zone - > cond = BLK_ZONE_COND_NOT_WP ;
sector + = dev - > zone_size_sects ;
}
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
2020-11-20 10:55:14 +09:00
zone = & dev - > zones [ i ] ;
2018-07-06 19:38:39 +02:00
2020-11-20 10:55:14 +09:00
null_init_zone_lock ( dev , zone ) ;
2018-07-06 19:38:39 +02:00
zone - > start = zone - > wp = sector ;
2020-11-20 10:55:11 +09:00
if ( zone - > start + dev - > zone_size_sects > dev_capacity_sects )
zone - > len = dev_capacity_sects - zone - > start ;
else
zone - > len = dev - > zone_size_sects ;
zone - > capacity =
min_t ( sector_t , zone - > len , zone_capacity_sects ) ;
2018-07-06 19:38:39 +02:00
zone - > type = BLK_ZONE_TYPE_SEQWRITE_REQ ;
zone - > cond = BLK_ZONE_COND_EMPTY ;
sector + = dev - > zone_size_sects ;
}
2020-04-23 12:02:38 +09:00
return 0 ;
}
int null_register_zoned_dev ( struct nullb * nullb )
{
2020-05-12 17:55:52 +09:00
struct nullb_device * dev = nullb - > dev ;
2020-04-23 12:02:38 +09:00
struct request_queue * q = nullb - > q ;
2021-01-28 13:47:28 +09:00
blk_queue_set_zoned ( nullb - > disk , BLK_ZONED_HM ) ;
blk_queue_flag_set ( QUEUE_FLAG_ZONE_RESETALL , q ) ;
blk_queue_required_elevator_features ( q , ELEVATOR_F_ZBD_SEQ_WRITE ) ;
2020-05-12 17:55:52 +09:00
if ( queue_is_mq ( q ) ) {
int ret = blk_revalidate_disk_zones ( nullb - > disk , NULL ) ;
if ( ret )
return ret ;
} else {
blk_queue_chunk_sectors ( q , dev - > zone_size_sects ) ;
q - > nr_zones = blkdev_nr_zones ( nullb - > disk ) ;
}
2020-04-23 12:02:38 +09:00
2020-05-12 17:55:52 +09:00
blk_queue_max_zone_append_sectors ( q , dev - > zone_size_sects ) ;
2020-08-28 12:54:00 +02:00
blk_queue_max_open_zones ( q , dev - > zone_max_open ) ;
blk_queue_max_active_zones ( q , dev - > zone_max_active ) ;
2020-04-23 12:02:38 +09:00
2018-07-06 19:38:39 +02:00
return 0 ;
}
2020-04-23 12:02:38 +09:00
void null_free_zoned_dev ( struct nullb_device * dev )
2018-07-06 19:38:39 +02:00
{
kvfree ( dev - > zones ) ;
}
2019-11-11 11:39:27 +09:00
int null_report_zones ( struct gendisk * disk , sector_t sector ,
2019-11-11 11:39:30 +09:00
unsigned int nr_zones , report_zones_cb cb , void * data )
2018-07-06 19:38:39 +02:00
{
2018-10-12 19:08:49 +09:00
struct nullb * nullb = disk - > private_data ;
struct nullb_device * dev = nullb - > dev ;
2020-11-20 10:55:14 +09:00
unsigned int first_zone , i ;
struct nullb_zone * zone ;
struct blk_zone blkz ;
2019-11-11 11:39:30 +09:00
int error ;
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
first_zone = null_zone_no ( dev , sector ) ;
if ( first_zone > = dev - > nr_zones )
return 0 ;
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
nr_zones = min ( nr_zones , dev - > nr_zones - first_zone ) ;
2020-03-25 10:49:56 -07:00
trace_nullb_report_zones ( nullb , nr_zones ) ;
2020-11-20 10:55:14 +09:00
memset ( & blkz , 0 , sizeof ( struct blk_zone ) ) ;
zone = & dev - > zones [ first_zone ] ;
for ( i = 0 ; i < nr_zones ; i + + , zone + + ) {
2019-11-11 11:39:30 +09:00
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback .
* So use a local copy to avoid corruption of the device zone
* array .
*/
2020-11-20 10:55:14 +09:00
null_lock_zone ( dev , zone ) ;
blkz . start = zone - > start ;
blkz . len = zone - > len ;
blkz . wp = zone - > wp ;
blkz . type = zone - > type ;
blkz . cond = zone - > cond ;
blkz . capacity = zone - > capacity ;
null_unlock_zone ( dev , zone ) ;
error = cb ( & blkz , i , data ) ;
2019-11-11 11:39:30 +09:00
if ( error )
return error ;
}
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
return nr_zones ;
2018-07-06 19:38:39 +02:00
}
2020-10-29 20:05:00 +09:00
/*
* This is called in the case of memory backing from null_process_cmd ( )
* with the target zone already locked .
*/
2019-10-17 14:19:43 -07:00
size_t null_zone_valid_read_len ( struct nullb * nullb ,
sector_t sector , unsigned int len )
{
struct nullb_device * dev = nullb - > dev ;
2020-11-20 10:55:14 +09:00
struct nullb_zone * zone = & dev - > zones [ null_zone_no ( dev , sector ) ] ;
2019-10-17 14:19:43 -07:00
unsigned int nr_sectors = len > > SECTOR_SHIFT ;
/* Read must be below the write pointer position */
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL | |
sector + nr_sectors < = zone - > wp )
return len ;
if ( sector > zone - > wp )
return 0 ;
return ( zone - > wp - sector ) < < SECTOR_SHIFT ;
}
2020-11-20 10:55:14 +09:00
static blk_status_t __null_close_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
2020-08-28 12:54:00 +02:00
{
switch ( zone - > cond ) {
case BLK_ZONE_COND_CLOSED :
/* close operation on closed is not an error */
return BLK_STS_OK ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_EMPTY :
case BLK_ZONE_COND_FULL :
default :
return BLK_STS_IOERR ;
}
if ( zone - > wp = = zone - > start ) {
zone - > cond = BLK_ZONE_COND_EMPTY ;
} else {
zone - > cond = BLK_ZONE_COND_CLOSED ;
dev - > nr_zones_closed + + ;
}
return BLK_STS_OK ;
}
2020-11-20 10:55:15 +09:00
static void null_close_imp_open_zone ( struct nullb_device * dev )
2020-08-28 12:54:00 +02:00
{
2020-11-20 10:55:15 +09:00
struct nullb_zone * zone ;
unsigned int zno , i ;
zno = dev - > imp_close_zone_no ;
if ( zno > = dev - > nr_zones )
zno = dev - > zone_nr_conv ;
2020-08-28 12:54:00 +02:00
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
2020-11-20 10:55:15 +09:00
zone = & dev - > zones [ zno ] ;
zno + + ;
if ( zno > = dev - > nr_zones )
zno = dev - > zone_nr_conv ;
if ( zone - > cond = = BLK_ZONE_COND_IMP_OPEN ) {
__null_close_zone ( dev , zone ) ;
dev - > imp_close_zone_no = zno ;
2020-08-28 12:54:00 +02:00
return ;
}
}
}
2020-10-22 08:47:39 -07:00
static blk_status_t null_check_active ( struct nullb_device * dev )
2020-08-28 12:54:00 +02:00
{
if ( ! dev - > zone_max_active )
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
if ( dev - > nr_zones_exp_open + dev - > nr_zones_imp_open +
dev - > nr_zones_closed < dev - > zone_max_active )
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
2020-10-22 08:47:39 -07:00
return BLK_STS_ZONE_ACTIVE_RESOURCE ;
2020-08-28 12:54:00 +02:00
}
2020-10-22 08:47:39 -07:00
static blk_status_t null_check_open ( struct nullb_device * dev )
2020-08-28 12:54:00 +02:00
{
if ( ! dev - > zone_max_open )
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
if ( dev - > nr_zones_exp_open + dev - > nr_zones_imp_open < dev - > zone_max_open )
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
2020-10-22 08:47:39 -07:00
if ( dev - > nr_zones_imp_open ) {
if ( null_check_active ( dev ) = = BLK_STS_OK ) {
2020-11-20 10:55:15 +09:00
null_close_imp_open_zone ( dev ) ;
2020-10-22 08:47:39 -07:00
return BLK_STS_OK ;
}
2020-08-28 12:54:00 +02:00
}
2020-10-22 08:47:39 -07:00
return BLK_STS_ZONE_OPEN_RESOURCE ;
2020-08-28 12:54:00 +02:00
}
/*
* This function matches the manage open zone resources function in the ZBC standard ,
* with the addition of max active zones support ( added in the ZNS standard ) .
*
* The function determines if a zone can transition to implicit open or explicit open ,
* while maintaining the max open zone ( and max active zone ) limit ( s ) . It may close an
* implicit open zone in order to make additional zone resources available .
*
* ZBC states that an implicit open zone shall be closed only if there is not
* room within the open limit . However , with the addition of an active limit ,
* it is not certain that closing an implicit open zone will allow a new zone
* to be opened , since we might already be at the active limit capacity .
*/
2020-11-20 10:55:14 +09:00
static blk_status_t null_check_zone_resources ( struct nullb_device * dev ,
struct nullb_zone * zone )
2020-08-28 12:54:00 +02:00
{
2020-10-22 08:47:39 -07:00
blk_status_t ret ;
2020-08-28 12:54:00 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EMPTY :
2020-10-22 08:47:39 -07:00
ret = null_check_active ( dev ) ;
if ( ret ! = BLK_STS_OK )
return ret ;
2020-08-28 12:54:00 +02:00
fallthrough ;
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
return null_check_open ( dev ) ;
2020-08-28 12:54:00 +02:00
default :
/* Should never be called for other states */
WARN_ON ( 1 ) ;
2020-10-22 08:47:39 -07:00
return BLK_STS_IOERR ;
2020-08-28 12:54:00 +02:00
}
}
2019-08-22 21:45:18 -07:00
static blk_status_t null_zone_write ( struct nullb_cmd * cmd , sector_t sector ,
2020-05-12 17:55:52 +09:00
unsigned int nr_sectors , bool append )
2018-07-06 19:38:39 +02:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
unsigned int zno = null_zone_no ( dev , sector ) ;
2020-11-20 10:55:14 +09:00
struct nullb_zone * zone = & dev - > zones [ zno ] ;
2020-04-23 12:02:37 +09:00
blk_status_t ret ;
trace_nullb_zone_op ( cmd , zno , zone - > cond ) ;
2020-11-20 10:55:12 +09:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL ) {
if ( append )
return BLK_STS_IOERR ;
2020-04-23 12:02:37 +09:00
return null_process_cmd ( cmd , REQ_OP_WRITE , sector , nr_sectors ) ;
2020-11-20 10:55:12 +09:00
}
2018-07-06 19:38:39 +02:00
2020-11-20 10:55:14 +09:00
null_lock_zone ( dev , zone ) ;
2020-10-29 20:05:00 +09:00
2020-11-20 10:55:14 +09:00
if ( zone - > cond = = BLK_ZONE_COND_FULL ) {
2018-07-06 19:38:39 +02:00
/* Cannot write to a full zone */
2020-10-29 20:05:00 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2020-08-28 12:54:00 +02:00
}
/*
* Regular writes must be at the write pointer position .
* Zone append writes are automatically issued at the write
* pointer and the position returned using the request or BIO
* sector .
*/
if ( append ) {
sector = zone - > wp ;
if ( cmd - > bio )
cmd - > bio - > bi_iter . bi_sector = sector ;
else
cmd - > rq - > __sector = sector ;
} else if ( sector ! = zone - > wp ) {
2020-10-29 20:05:00 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2020-08-28 12:54:00 +02:00
}
2020-10-29 20:05:00 +09:00
if ( zone - > wp + nr_sectors > zone - > start + zone - > capacity ) {
ret = BLK_STS_IOERR ;
goto unlock ;
}
2020-08-28 12:54:00 +02:00
2020-11-20 10:55:14 +09:00
if ( zone - > cond = = BLK_ZONE_COND_CLOSED | |
zone - > cond = = BLK_ZONE_COND_EMPTY ) {
null_lock_zone_res ( dev ) ;
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK ) {
null_unlock_zone_res ( dev ) ;
goto unlock ;
}
if ( zone - > cond = = BLK_ZONE_COND_CLOSED ) {
dev - > nr_zones_closed - - ;
dev - > nr_zones_imp_open + + ;
} else if ( zone - > cond = = BLK_ZONE_COND_EMPTY ) {
dev - > nr_zones_imp_open + + ;
}
if ( zone - > cond ! = BLK_ZONE_COND_EXP_OPEN )
zone - > cond = BLK_ZONE_COND_IMP_OPEN ;
null_unlock_zone_res ( dev ) ;
2020-08-28 12:54:00 +02:00
}
ret = null_process_cmd ( cmd , REQ_OP_WRITE , sector , nr_sectors ) ;
if ( ret ! = BLK_STS_OK )
2020-10-29 20:05:00 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
zone - > wp + = nr_sectors ;
if ( zone - > wp = = zone - > start + zone - > capacity ) {
2020-11-20 10:55:14 +09:00
null_lock_zone_res ( dev ) ;
2020-08-28 12:54:00 +02:00
if ( zone - > cond = = BLK_ZONE_COND_EXP_OPEN )
dev - > nr_zones_exp_open - - ;
else if ( zone - > cond = = BLK_ZONE_COND_IMP_OPEN )
dev - > nr_zones_imp_open - - ;
zone - > cond = BLK_ZONE_COND_FULL ;
2020-11-20 10:55:14 +09:00
null_unlock_zone_res ( dev ) ;
2020-08-28 12:54:00 +02:00
}
2020-11-20 10:55:14 +09:00
2020-10-29 20:05:00 +09:00
ret = BLK_STS_OK ;
unlock :
2020-11-20 10:55:14 +09:00
null_unlock_zone ( dev , zone ) ;
2020-10-29 20:05:00 +09:00
return ret ;
2020-08-28 12:54:00 +02:00
}
2020-11-20 10:55:14 +09:00
static blk_status_t null_open_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
2020-08-28 12:54:00 +02:00
{
2020-11-20 10:55:14 +09:00
blk_status_t ret = BLK_STS_OK ;
2020-10-22 08:47:39 -07:00
2020-08-28 12:54:00 +02:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
2020-11-20 10:55:14 +09:00
null_lock_zone_res ( dev ) ;
2020-08-28 12:54:00 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EXP_OPEN :
/* open operation on exp open is not an error */
2020-11-20 10:55:14 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
case BLK_ZONE_COND_EMPTY :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
2020-11-20 10:55:14 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
break ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
2020-01-09 14:03:55 +09:00
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
2020-11-20 10:55:14 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
dev - > nr_zones_closed - - ;
break ;
case BLK_ZONE_COND_FULL :
default :
2020-11-20 10:55:14 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2020-08-28 12:54:00 +02:00
}
zone - > cond = BLK_ZONE_COND_EXP_OPEN ;
dev - > nr_zones_exp_open + + ;
2018-07-06 19:38:39 +02:00
2020-11-20 10:55:14 +09:00
unlock :
null_unlock_zone_res ( dev ) ;
return ret ;
2020-08-28 12:54:00 +02:00
}
2020-11-20 10:55:14 +09:00
static blk_status_t null_close_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
2020-08-28 12:54:00 +02:00
{
2020-10-22 08:47:39 -07:00
blk_status_t ret ;
2020-08-28 12:54:00 +02:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
2020-11-20 10:55:14 +09:00
null_lock_zone_res ( dev ) ;
ret = __null_close_zone ( dev , zone ) ;
null_unlock_zone_res ( dev ) ;
return ret ;
}
static blk_status_t null_finish_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
{
blk_status_t ret = BLK_STS_OK ;
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
null_lock_zone_res ( dev ) ;
2020-08-28 12:54:00 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_FULL :
/* finish operation on full is not an error */
2020-11-20 10:55:14 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
case BLK_ZONE_COND_EMPTY :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
2020-11-20 10:55:14 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
break ;
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_CLOSED :
2020-10-22 08:47:39 -07:00
ret = null_check_zone_resources ( dev , zone ) ;
if ( ret ! = BLK_STS_OK )
2020-11-20 10:55:14 +09:00
goto unlock ;
2020-08-28 12:54:00 +02:00
dev - > nr_zones_closed - - ;
break ;
default :
2020-11-20 10:55:14 +09:00
ret = BLK_STS_IOERR ;
goto unlock ;
2020-08-28 12:54:00 +02:00
}
2020-06-29 12:06:38 -07:00
2020-08-28 12:54:00 +02:00
zone - > cond = BLK_ZONE_COND_FULL ;
zone - > wp = zone - > start + zone - > len ;
2018-07-06 19:38:39 +02:00
2020-11-20 10:55:14 +09:00
unlock :
null_unlock_zone_res ( dev ) ;
return ret ;
2020-08-28 12:54:00 +02:00
}
2020-11-20 10:55:14 +09:00
static blk_status_t null_reset_zone ( struct nullb_device * dev ,
struct nullb_zone * zone )
2020-08-28 12:54:00 +02:00
{
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
2020-04-23 12:02:37 +09:00
2020-11-20 10:55:14 +09:00
null_lock_zone_res ( dev ) ;
2020-08-28 12:54:00 +02:00
switch ( zone - > cond ) {
case BLK_ZONE_COND_EMPTY :
/* reset operation on empty is not an error */
2020-11-20 10:55:14 +09:00
null_unlock_zone_res ( dev ) ;
2020-04-23 12:02:37 +09:00
return BLK_STS_OK ;
2020-08-28 12:54:00 +02:00
case BLK_ZONE_COND_IMP_OPEN :
dev - > nr_zones_imp_open - - ;
break ;
case BLK_ZONE_COND_EXP_OPEN :
dev - > nr_zones_exp_open - - ;
break ;
case BLK_ZONE_COND_CLOSED :
dev - > nr_zones_closed - - ;
break ;
case BLK_ZONE_COND_FULL :
break ;
2018-07-06 19:38:39 +02:00
default :
2020-11-20 10:55:14 +09:00
null_unlock_zone_res ( dev ) ;
2019-08-22 21:45:18 -07:00
return BLK_STS_IOERR ;
2018-07-06 19:38:39 +02:00
}
2020-08-28 12:54:00 +02:00
zone - > cond = BLK_ZONE_COND_EMPTY ;
zone - > wp = zone - > start ;
2020-11-20 10:55:14 +09:00
null_unlock_zone_res ( dev ) ;
2020-11-20 10:55:17 +09:00
if ( dev - > memory_backed )
return null_handle_discard ( dev , zone - > start , zone - > len ) ;
2020-08-28 12:54:00 +02:00
return BLK_STS_OK ;
2018-07-06 19:38:39 +02:00
}
2019-10-27 23:05:49 +09:00
static blk_status_t null_zone_mgmt ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector )
2018-07-06 19:38:39 +02:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
2020-10-29 20:05:00 +09:00
unsigned int zone_no ;
2020-11-20 10:55:14 +09:00
struct nullb_zone * zone ;
2020-10-29 20:05:00 +09:00
blk_status_t ret ;
2019-08-01 10:26:38 -07:00
size_t i ;
2020-10-29 20:05:00 +09:00
if ( op = = REQ_OP_ZONE_RESET_ALL ) {
2020-10-29 20:04:59 +09:00
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
zone = & dev - > zones [ i ] ;
2020-11-20 10:55:14 +09:00
null_lock_zone ( dev , zone ) ;
2020-10-29 20:04:59 +09:00
if ( zone - > cond ! = BLK_ZONE_COND_EMPTY ) {
null_reset_zone ( dev , zone ) ;
trace_nullb_zone_op ( cmd , i , zone - > cond ) ;
}
2020-11-20 10:55:14 +09:00
null_unlock_zone ( dev , zone ) ;
2020-10-29 20:04:59 +09:00
}
return BLK_STS_OK ;
2020-10-29 20:05:00 +09:00
}
zone_no = null_zone_no ( dev , sector ) ;
zone = & dev - > zones [ zone_no ] ;
2020-11-20 10:55:14 +09:00
null_lock_zone ( dev , zone ) ;
2020-10-29 20:05:00 +09:00
switch ( op ) {
2019-08-01 10:26:38 -07:00
case REQ_OP_ZONE_RESET :
2020-08-28 12:54:00 +02:00
ret = null_reset_zone ( dev , zone ) ;
2019-08-01 10:26:38 -07:00
break ;
2019-10-27 23:05:49 +09:00
case REQ_OP_ZONE_OPEN :
2020-08-28 12:54:00 +02:00
ret = null_open_zone ( dev , zone ) ;
2019-10-27 23:05:49 +09:00
break ;
case REQ_OP_ZONE_CLOSE :
2020-08-28 12:54:00 +02:00
ret = null_close_zone ( dev , zone ) ;
2019-10-27 23:05:49 +09:00
break ;
case REQ_OP_ZONE_FINISH :
2020-08-28 12:54:00 +02:00
ret = null_finish_zone ( dev , zone ) ;
2019-10-27 23:05:49 +09:00
break ;
2019-08-01 10:26:38 -07:00
default :
2020-10-29 20:05:00 +09:00
ret = BLK_STS_NOTSUPP ;
break ;
2018-10-30 16:14:05 +09:00
}
2020-03-25 10:49:56 -07:00
2020-08-28 12:54:00 +02:00
if ( ret = = BLK_STS_OK )
trace_nullb_zone_op ( cmd , zone_no , zone - > cond ) ;
2020-11-20 10:55:14 +09:00
null_unlock_zone ( dev , zone ) ;
2020-10-29 20:05:00 +09:00
2020-08-28 12:54:00 +02:00
return ret ;
2019-08-22 21:45:18 -07:00
}
2020-04-23 12:02:37 +09:00
blk_status_t null_process_zoned_cmd ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector , sector_t nr_sectors )
2019-08-22 21:45:18 -07:00
{
2020-11-20 10:55:14 +09:00
struct nullb_device * dev ;
struct nullb_zone * zone ;
2020-10-29 20:05:00 +09:00
blk_status_t sts ;
2020-09-28 15:25:49 +05:30
2019-08-22 21:45:18 -07:00
switch ( op ) {
case REQ_OP_WRITE :
2020-11-20 10:55:14 +09:00
return null_zone_write ( cmd , sector , nr_sectors , false ) ;
2020-05-12 17:55:52 +09:00
case REQ_OP_ZONE_APPEND :
2020-11-20 10:55:14 +09:00
return null_zone_write ( cmd , sector , nr_sectors , true ) ;
2019-08-22 21:45:18 -07:00
case REQ_OP_ZONE_RESET :
case REQ_OP_ZONE_RESET_ALL :
2019-10-27 23:05:49 +09:00
case REQ_OP_ZONE_OPEN :
case REQ_OP_ZONE_CLOSE :
case REQ_OP_ZONE_FINISH :
2020-11-20 10:55:14 +09:00
return null_zone_mgmt ( cmd , op , sector ) ;
2019-08-22 21:45:18 -07:00
default :
2020-11-20 10:55:14 +09:00
dev = cmd - > nq - > dev ;
zone = & dev - > zones [ null_zone_no ( dev , sector ) ] ;
null_lock_zone ( dev , zone ) ;
2020-09-28 15:25:49 +05:30
sts = null_process_cmd ( cmd , op , sector , nr_sectors ) ;
2020-11-20 10:55:14 +09:00
null_unlock_zone ( dev , zone ) ;
return sts ;
2019-08-22 21:45:18 -07:00
}
2018-07-06 19:38:39 +02:00
}