2018-07-06 19:38:39 +02:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/vmalloc.h>
# include "null_blk.h"
/* zone_size in MBs to sectors. */
# define ZONE_SIZE_SHIFT 11
static inline unsigned int null_zone_no ( struct nullb_device * dev , sector_t sect )
{
return sect > > ilog2 ( dev - > zone_size_sects ) ;
}
int null_zone_init ( struct nullb_device * dev )
{
sector_t dev_size = ( sector_t ) dev - > size * 1024 * 1024 ;
sector_t sector = 0 ;
unsigned int i ;
if ( ! is_power_of_2 ( dev - > zone_size ) ) {
2019-09-16 11:07:59 -03:00
pr_err ( " zone_size must be power-of-two \n " ) ;
2018-07-06 19:38:39 +02:00
return - EINVAL ;
}
dev - > zone_size_sects = dev - > zone_size < < ZONE_SIZE_SHIFT ;
dev - > nr_zones = dev_size > >
( SECTOR_SHIFT + ilog2 ( dev - > zone_size_sects ) ) ;
dev - > zones = kvmalloc_array ( dev - > nr_zones , sizeof ( struct blk_zone ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! dev - > zones )
return - ENOMEM ;
2018-10-30 16:14:05 +09:00
if ( dev - > zone_nr_conv > = dev - > nr_zones ) {
dev - > zone_nr_conv = dev - > nr_zones - 1 ;
2019-09-16 11:07:59 -03:00
pr_info ( " changed the number of conventional zones to %u " ,
2018-10-30 16:14:05 +09:00
dev - > zone_nr_conv ) ;
}
for ( i = 0 ; i < dev - > zone_nr_conv ; i + + ) {
struct blk_zone * zone = & dev - > zones [ i ] ;
zone - > start = sector ;
zone - > len = dev - > zone_size_sects ;
zone - > wp = zone - > start + zone - > len ;
zone - > type = BLK_ZONE_TYPE_CONVENTIONAL ;
zone - > cond = BLK_ZONE_COND_NOT_WP ;
sector + = dev - > zone_size_sects ;
}
for ( i = dev - > zone_nr_conv ; i < dev - > nr_zones ; i + + ) {
2018-07-06 19:38:39 +02:00
struct blk_zone * zone = & dev - > zones [ i ] ;
zone - > start = zone - > wp = sector ;
zone - > len = dev - > zone_size_sects ;
zone - > type = BLK_ZONE_TYPE_SEQWRITE_REQ ;
zone - > cond = BLK_ZONE_COND_EMPTY ;
sector + = dev - > zone_size_sects ;
}
return 0 ;
}
void null_zone_exit ( struct nullb_device * dev )
{
kvfree ( dev - > zones ) ;
}
2019-11-11 11:39:27 +09:00
int null_report_zones ( struct gendisk * disk , sector_t sector ,
2019-11-11 11:39:30 +09:00
unsigned int nr_zones , report_zones_cb cb , void * data )
2018-07-06 19:38:39 +02:00
{
2018-10-12 19:08:49 +09:00
struct nullb * nullb = disk - > private_data ;
struct nullb_device * dev = nullb - > dev ;
2019-11-11 11:39:30 +09:00
unsigned int first_zone , i ;
struct blk_zone zone ;
int error ;
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
first_zone = null_zone_no ( dev , sector ) ;
if ( first_zone > = dev - > nr_zones )
return 0 ;
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
nr_zones = min ( nr_zones , dev - > nr_zones - first_zone ) ;
for ( i = 0 ; i < nr_zones ; i + + ) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback .
* So use a local copy to avoid corruption of the device zone
* array .
*/
memcpy ( & zone , & dev - > zones [ first_zone + i ] ,
sizeof ( struct blk_zone ) ) ;
error = cb ( & zone , i , data ) ;
if ( error )
return error ;
}
2018-07-06 19:38:39 +02:00
2019-11-11 11:39:30 +09:00
return nr_zones ;
2018-07-06 19:38:39 +02:00
}
2019-10-17 14:19:43 -07:00
size_t null_zone_valid_read_len ( struct nullb * nullb ,
sector_t sector , unsigned int len )
{
struct nullb_device * dev = nullb - > dev ;
struct blk_zone * zone = & dev - > zones [ null_zone_no ( dev , sector ) ] ;
unsigned int nr_sectors = len > > SECTOR_SHIFT ;
/* Read must be below the write pointer position */
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL | |
sector + nr_sectors < = zone - > wp )
return len ;
if ( sector > zone - > wp )
return 0 ;
return ( zone - > wp - sector ) < < SECTOR_SHIFT ;
}
2019-08-22 21:45:18 -07:00
static blk_status_t null_zone_write ( struct nullb_cmd * cmd , sector_t sector ,
2018-09-12 18:21:11 -06:00
unsigned int nr_sectors )
2018-07-06 19:38:39 +02:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
unsigned int zno = null_zone_no ( dev , sector ) ;
struct blk_zone * zone = & dev - > zones [ zno ] ;
switch ( zone - > cond ) {
case BLK_ZONE_COND_FULL :
/* Cannot write to a full zone */
cmd - > error = BLK_STS_IOERR ;
2019-08-22 21:45:18 -07:00
return BLK_STS_IOERR ;
2018-07-06 19:38:39 +02:00
case BLK_ZONE_COND_EMPTY :
case BLK_ZONE_COND_IMP_OPEN :
2020-01-09 14:03:55 +09:00
case BLK_ZONE_COND_EXP_OPEN :
case BLK_ZONE_COND_CLOSED :
2018-07-06 19:38:39 +02:00
/* Writes must be at the write pointer position */
2019-08-22 21:45:18 -07:00
if ( sector ! = zone - > wp )
return BLK_STS_IOERR ;
2018-07-06 19:38:39 +02:00
2020-01-09 14:03:55 +09:00
if ( zone - > cond ! = BLK_ZONE_COND_EXP_OPEN )
2018-07-06 19:38:39 +02:00
zone - > cond = BLK_ZONE_COND_IMP_OPEN ;
2018-09-12 18:21:11 -06:00
zone - > wp + = nr_sectors ;
2018-07-06 19:38:39 +02:00
if ( zone - > wp = = zone - > start + zone - > len )
zone - > cond = BLK_ZONE_COND_FULL ;
break ;
2018-10-30 16:14:05 +09:00
case BLK_ZONE_COND_NOT_WP :
break ;
2018-07-06 19:38:39 +02:00
default :
/* Invalid zone condition */
2019-08-22 21:45:18 -07:00
return BLK_STS_IOERR ;
2018-07-06 19:38:39 +02:00
}
2019-08-22 21:45:18 -07:00
return BLK_STS_OK ;
2018-07-06 19:38:39 +02:00
}
2019-10-27 23:05:49 +09:00
static blk_status_t null_zone_mgmt ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector )
2018-07-06 19:38:39 +02:00
{
struct nullb_device * dev = cmd - > nq - > dev ;
2019-10-17 14:19:43 -07:00
struct blk_zone * zone = & dev - > zones [ null_zone_no ( dev , sector ) ] ;
2019-08-01 10:26:38 -07:00
size_t i ;
2019-10-27 23:05:49 +09:00
switch ( op ) {
2019-08-01 10:26:38 -07:00
case REQ_OP_ZONE_RESET_ALL :
for ( i = 0 ; i < dev - > nr_zones ; i + + ) {
if ( zone [ i ] . type = = BLK_ZONE_TYPE_CONVENTIONAL )
continue ;
zone [ i ] . cond = BLK_ZONE_COND_EMPTY ;
zone [ i ] . wp = zone [ i ] . start ;
}
break ;
case REQ_OP_ZONE_RESET :
2019-08-22 21:45:18 -07:00
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
2018-07-06 19:38:39 +02:00
2019-08-01 10:26:38 -07:00
zone - > cond = BLK_ZONE_COND_EMPTY ;
zone - > wp = zone - > start ;
break ;
2019-10-27 23:05:49 +09:00
case REQ_OP_ZONE_OPEN :
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
if ( zone - > cond = = BLK_ZONE_COND_FULL )
return BLK_STS_IOERR ;
zone - > cond = BLK_ZONE_COND_EXP_OPEN ;
break ;
case REQ_OP_ZONE_CLOSE :
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
if ( zone - > cond = = BLK_ZONE_COND_FULL )
return BLK_STS_IOERR ;
2019-12-26 15:54:25 +09:00
if ( zone - > wp = = zone - > start )
zone - > cond = BLK_ZONE_COND_EMPTY ;
else
zone - > cond = BLK_ZONE_COND_CLOSED ;
2019-10-27 23:05:49 +09:00
break ;
case REQ_OP_ZONE_FINISH :
if ( zone - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
return BLK_STS_IOERR ;
zone - > cond = BLK_ZONE_COND_FULL ;
zone - > wp = zone - > start + zone - > len ;
break ;
2019-08-01 10:26:38 -07:00
default :
2019-10-10 00:38:13 +09:00
return BLK_STS_NOTSUPP ;
2018-10-30 16:14:05 +09:00
}
2019-08-22 21:45:18 -07:00
return BLK_STS_OK ;
}
2019-08-23 12:49:00 -06:00
blk_status_t null_handle_zoned ( struct nullb_cmd * cmd , enum req_opf op ,
sector_t sector , sector_t nr_sectors )
2019-08-22 21:45:18 -07:00
{
switch ( op ) {
case REQ_OP_WRITE :
return null_zone_write ( cmd , sector , nr_sectors ) ;
case REQ_OP_ZONE_RESET :
case REQ_OP_ZONE_RESET_ALL :
2019-10-27 23:05:49 +09:00
case REQ_OP_ZONE_OPEN :
case REQ_OP_ZONE_CLOSE :
case REQ_OP_ZONE_FINISH :
return null_zone_mgmt ( cmd , op , sector ) ;
2019-08-22 21:45:18 -07:00
default :
return BLK_STS_OK ;
}
2018-07-06 19:38:39 +02:00
}