2019-04-30 21:42:43 +03:00
// SPDX-License-Identifier: GPL-2.0
2016-10-18 09:40:33 +03:00
/*
* Zoned block device handling
*
* Copyright ( c ) 2015 , Hannes Reinecke
* Copyright ( c ) 2015 , SUSE Linux GmbH
*
* Copyright ( c ) 2016 , Damien Le Moal
* Copyright ( c ) 2016 , Western Digital
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/rbtree.h>
# include <linux/blkdev.h>
2018-10-12 13:08:50 +03:00
# include <linux/blk-mq.h>
2016-10-18 09:40:33 +03:00
2018-10-12 13:08:47 +03:00
# include "blk.h"
2016-10-18 09:40:33 +03:00
static inline sector_t blk_zone_start ( struct request_queue * q ,
sector_t sector )
{
2017-01-12 17:58:32 +03:00
sector_t zone_mask = blk_queue_zone_sectors ( q ) - 1 ;
2016-10-18 09:40:33 +03:00
return sector & ~ zone_mask ;
}
2017-12-21 09:43:38 +03:00
/*
* Return true if a request is a write requests that needs zone write locking .
*/
bool blk_req_needs_zone_write_lock ( struct request * rq )
{
if ( ! rq - > q - > seq_zones_wlock )
return false ;
if ( blk_rq_is_passthrough ( rq ) )
return false ;
switch ( req_op ( rq ) ) {
case REQ_OP_WRITE_ZEROES :
case REQ_OP_WRITE_SAME :
case REQ_OP_WRITE :
return blk_rq_zone_is_seq ( rq ) ;
default :
return false ;
}
}
EXPORT_SYMBOL_GPL ( blk_req_needs_zone_write_lock ) ;
void __blk_req_zone_write_lock ( struct request * rq )
{
if ( WARN_ON_ONCE ( test_and_set_bit ( blk_rq_zone_no ( rq ) ,
rq - > q - > seq_zones_wlock ) ) )
return ;
WARN_ON_ONCE ( rq - > rq_flags & RQF_ZONE_WRITE_LOCKED ) ;
rq - > rq_flags | = RQF_ZONE_WRITE_LOCKED ;
}
EXPORT_SYMBOL_GPL ( __blk_req_zone_write_lock ) ;
void __blk_req_zone_write_unlock ( struct request * rq )
{
rq - > rq_flags & = ~ RQF_ZONE_WRITE_LOCKED ;
if ( rq - > q - > seq_zones_wlock )
WARN_ON_ONCE ( ! test_and_clear_bit ( blk_rq_zone_no ( rq ) ,
rq - > q - > seq_zones_wlock ) ) ;
}
EXPORT_SYMBOL_GPL ( __blk_req_zone_write_unlock ) ;
2018-10-12 13:08:43 +03:00
static inline unsigned int __blkdev_nr_zones ( struct request_queue * q ,
sector_t nr_sectors )
{
unsigned long zone_sectors = blk_queue_zone_sectors ( q ) ;
return ( nr_sectors + zone_sectors - 1 ) > > ilog2 ( zone_sectors ) ;
}
/**
* blkdev_nr_zones - Get number of zones
* @ bdev : Target block device
*
* Description :
* Return the total number of zones of a zoned block device .
* For a regular block device , the number of zones is always 0.
*/
unsigned int blkdev_nr_zones ( struct block_device * bdev )
{
struct request_queue * q = bdev_get_queue ( bdev ) ;
if ( ! blk_queue_is_zoned ( q ) )
return 0 ;
return __blkdev_nr_zones ( q , bdev - > bd_part - > nr_sects ) ;
}
EXPORT_SYMBOL_GPL ( blkdev_nr_zones ) ;
2016-10-18 09:40:33 +03:00
/*
2018-10-12 13:08:49 +03:00
* Check that a zone report belongs to this partition , and if yes , fix its start
* sector and write pointer and return true . Return false otherwise .
2016-10-18 09:40:33 +03:00
*/
2018-10-12 13:08:49 +03:00
static bool blkdev_report_zone ( struct block_device * bdev , struct blk_zone * rep )
2016-10-18 09:40:33 +03:00
{
sector_t offset = get_start_sect ( bdev ) ;
if ( rep - > start < offset )
return false ;
rep - > start - = offset ;
if ( rep - > start + rep - > len > bdev - > bd_part - > nr_sects )
return false ;
if ( rep - > type = = BLK_ZONE_TYPE_CONVENTIONAL )
rep - > wp = rep - > start + rep - > len ;
else
rep - > wp - = offset ;
return true ;
}
2018-10-12 13:08:49 +03:00
static int blk_report_zones ( struct gendisk * disk , sector_t sector ,
struct blk_zone * zones , unsigned int * nr_zones ,
gfp_t gfp_mask )
{
struct request_queue * q = disk - > queue ;
unsigned int z = 0 , n , nrz = * nr_zones ;
sector_t capacity = get_capacity ( disk ) ;
int ret ;
while ( z < nrz & & sector < capacity ) {
n = nrz - z ;
ret = disk - > fops - > report_zones ( disk , sector , & zones [ z ] , & n ,
gfp_mask ) ;
if ( ret )
return ret ;
if ( ! n )
break ;
sector + = blk_queue_zone_sectors ( q ) * n ;
z + = n ;
}
WARN_ON ( z > * nr_zones ) ;
* nr_zones = z ;
return 0 ;
}
2016-10-18 09:40:33 +03:00
/**
* blkdev_report_zones - Get zones information
* @ bdev : Target block device
* @ sector : Sector from which to report zones
* @ zones : Array of zone structures where to return the zones information
* @ nr_zones : Number of zone structures in the zone array
* @ gfp_mask : Memory allocation flags ( for bio_alloc )
*
* Description :
* Get zone information starting from the zone containing @ sector .
* The number of zone information reported may be less than the number
* requested by @ nr_zones . The number of zones actually reported is
* returned in @ nr_zones .
*/
2018-10-12 13:08:49 +03:00
int blkdev_report_zones ( struct block_device * bdev , sector_t sector ,
struct blk_zone * zones , unsigned int * nr_zones ,
2016-10-18 09:40:33 +03:00
gfp_t gfp_mask )
{
struct request_queue * q = bdev_get_queue ( bdev ) ;
2018-10-12 13:08:49 +03:00
unsigned int i , nrz ;
2016-10-21 18:42:33 +03:00
int ret ;
2016-10-18 09:40:33 +03:00
if ( ! blk_queue_is_zoned ( q ) )
return - EOPNOTSUPP ;
2018-10-12 13:08:49 +03:00
/*
* A block device that advertized itself as zoned must have a
* report_zones method . If it does not have one defined , the device
* driver has a bug . So warn about that .
*/
if ( WARN_ON_ONCE ( ! bdev - > bd_disk - > fops - > report_zones ) )
return - EOPNOTSUPP ;
2016-10-18 09:40:33 +03:00
2018-10-12 13:08:49 +03:00
if ( ! * nr_zones | | sector > = bdev - > bd_part - > nr_sects ) {
2016-10-18 09:40:33 +03:00
* nr_zones = 0 ;
return 0 ;
}
2018-10-12 13:08:49 +03:00
nrz = min ( * nr_zones ,
__blkdev_nr_zones ( q , bdev - > bd_part - > nr_sects - sector ) ) ;
ret = blk_report_zones ( bdev - > bd_disk , get_start_sect ( bdev ) + sector ,
zones , & nrz , gfp_mask ) ;
2016-10-18 09:40:33 +03:00
if ( ret )
2018-10-12 13:08:49 +03:00
return ret ;
2016-10-18 09:40:33 +03:00
2018-10-12 13:08:49 +03:00
for ( i = 0 ; i < nrz ; i + + ) {
if ( ! blkdev_report_zone ( bdev , zones ) )
2016-10-18 09:40:33 +03:00
break ;
2018-10-12 13:08:49 +03:00
zones + + ;
2016-10-18 09:40:33 +03:00
}
2018-10-12 13:08:49 +03:00
* nr_zones = i ;
2016-10-18 09:40:33 +03:00
2018-10-12 13:08:49 +03:00
return 0 ;
2016-10-18 09:40:33 +03:00
}
EXPORT_SYMBOL_GPL ( blkdev_report_zones ) ;
/**
* blkdev_reset_zones - Reset zones write pointer
* @ bdev : Target block device
* @ sector : Start sector of the first zone to reset
* @ nr_sectors : Number of sectors , at least the length of one zone
* @ gfp_mask : Memory allocation flags ( for bio_alloc )
*
* Description :
* Reset the write pointer of the zones contained in the range
* @ sector . . @ sector + @ nr_sectors . Specifying the entire disk sector range
* is valid , but the specified range should not contain conventional zones .
*/
int blkdev_reset_zones ( struct block_device * bdev ,
sector_t sector , sector_t nr_sectors ,
gfp_t gfp_mask )
{
struct request_queue * q = bdev_get_queue ( bdev ) ;
sector_t zone_sectors ;
sector_t end_sector = sector + nr_sectors ;
2018-10-12 13:08:47 +03:00
struct bio * bio = NULL ;
struct blk_plug plug ;
2016-10-18 09:40:33 +03:00
int ret ;
if ( ! blk_queue_is_zoned ( q ) )
return - EOPNOTSUPP ;
2018-10-12 13:08:47 +03:00
if ( bdev_read_only ( bdev ) )
return - EPERM ;
if ( ! nr_sectors | | end_sector > bdev - > bd_part - > nr_sects )
2016-10-18 09:40:33 +03:00
/* Out of range */
return - EINVAL ;
/* Check alignment (handle eventual smaller last zone) */
2017-01-12 17:58:32 +03:00
zone_sectors = blk_queue_zone_sectors ( q ) ;
2016-10-18 09:40:33 +03:00
if ( sector & ( zone_sectors - 1 ) )
return - EINVAL ;
if ( ( nr_sectors & ( zone_sectors - 1 ) ) & &
end_sector ! = bdev - > bd_part - > nr_sects )
return - EINVAL ;
2018-10-12 13:08:47 +03:00
blk_start_plug ( & plug ) ;
2016-10-18 09:40:33 +03:00
while ( sector < end_sector ) {
2018-10-12 13:08:47 +03:00
bio = blk_next_bio ( bio , 0 , gfp_mask ) ;
2016-10-18 09:40:33 +03:00
bio - > bi_iter . bi_sector = sector ;
2017-08-23 20:10:32 +03:00
bio_set_dev ( bio , bdev ) ;
2016-10-18 09:40:33 +03:00
bio_set_op_attrs ( bio , REQ_OP_ZONE_RESET , 0 ) ;
sector + = zone_sectors ;
/* This may take a while, so be nice to others */
cond_resched ( ) ;
}
2018-10-12 13:08:47 +03:00
ret = submit_bio_wait ( bio ) ;
bio_put ( bio ) ;
blk_finish_plug ( & plug ) ;
return ret ;
2016-10-18 09:40:33 +03:00
}
EXPORT_SYMBOL_GPL ( blkdev_reset_zones ) ;
2016-10-18 09:40:35 +03:00
2018-03-09 02:28:50 +03:00
/*
2016-10-18 09:40:35 +03:00
* BLKREPORTZONE ioctl processing .
* Called from blkdev_ioctl .
*/
int blkdev_report_zones_ioctl ( struct block_device * bdev , fmode_t mode ,
unsigned int cmd , unsigned long arg )
{
void __user * argp = ( void __user * ) arg ;
struct request_queue * q ;
struct blk_zone_report rep ;
struct blk_zone * zones ;
int ret ;
if ( ! argp )
return - EINVAL ;
q = bdev_get_queue ( bdev ) ;
if ( ! q )
return - ENXIO ;
if ( ! blk_queue_is_zoned ( q ) )
return - ENOTTY ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EACCES ;
if ( copy_from_user ( & rep , argp , sizeof ( struct blk_zone_report ) ) )
return - EFAULT ;
if ( ! rep . nr_zones )
return - EINVAL ;
2018-10-12 13:08:44 +03:00
rep . nr_zones = min ( blkdev_nr_zones ( bdev ) , rep . nr_zones ) ;
2018-05-22 18:27:22 +03:00
treewide: kvmalloc() -> kvmalloc_array()
The kvmalloc() function has a 2-factor argument form, kvmalloc_array(). This
patch replaces cases of:
kvmalloc(a * b, gfp)
with:
kvmalloc_array(a * b, gfp)
as well as handling cases of:
kvmalloc(a * b * c, gfp)
with:
kvmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kvmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kvmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kvmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kvmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kvmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kvmalloc
+ kvmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kvmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kvmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kvmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kvmalloc(sizeof(THING) * C2, ...)
|
kvmalloc(sizeof(TYPE) * C2, ...)
|
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(C1 * C2, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:04:32 +03:00
zones = kvmalloc_array ( rep . nr_zones , sizeof ( struct blk_zone ) ,
GFP_KERNEL | __GFP_ZERO ) ;
2016-10-18 09:40:35 +03:00
if ( ! zones )
return - ENOMEM ;
ret = blkdev_report_zones ( bdev , rep . sector ,
zones , & rep . nr_zones ,
GFP_KERNEL ) ;
if ( ret )
goto out ;
if ( copy_to_user ( argp , & rep , sizeof ( struct blk_zone_report ) ) ) {
ret = - EFAULT ;
goto out ;
}
if ( rep . nr_zones ) {
if ( copy_to_user ( argp + sizeof ( struct blk_zone_report ) , zones ,
sizeof ( struct blk_zone ) * rep . nr_zones ) )
ret = - EFAULT ;
}
out :
2018-05-22 18:27:22 +03:00
kvfree ( zones ) ;
2016-10-18 09:40:35 +03:00
return ret ;
}
2018-03-09 02:28:50 +03:00
/*
2016-10-18 09:40:35 +03:00
* BLKRESETZONE ioctl processing .
* Called from blkdev_ioctl .
*/
int blkdev_reset_zones_ioctl ( struct block_device * bdev , fmode_t mode ,
unsigned int cmd , unsigned long arg )
{
void __user * argp = ( void __user * ) arg ;
struct request_queue * q ;
struct blk_zone_range zrange ;
if ( ! argp )
return - EINVAL ;
q = bdev_get_queue ( bdev ) ;
if ( ! q )
return - ENXIO ;
if ( ! blk_queue_is_zoned ( q ) )
return - ENOTTY ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EACCES ;
if ( ! ( mode & FMODE_WRITE ) )
return - EBADF ;
if ( copy_from_user ( & zrange , argp , sizeof ( struct blk_zone_range ) ) )
return - EFAULT ;
return blkdev_reset_zones ( bdev , zrange . sector , zrange . nr_sectors ,
GFP_KERNEL ) ;
}
2018-10-12 13:08:50 +03:00
static inline unsigned long * blk_alloc_zone_bitmap ( int node ,
unsigned int nr_zones )
{
return kcalloc_node ( BITS_TO_LONGS ( nr_zones ) , sizeof ( unsigned long ) ,
GFP_NOIO , node ) ;
}
/*
* Allocate an array of struct blk_zone to get nr_zones zone information .
* The allocated array may be smaller than nr_zones .
*/
static struct blk_zone * blk_alloc_zones ( int node , unsigned int * nr_zones )
{
size_t size = * nr_zones * sizeof ( struct blk_zone ) ;
struct page * page ;
int order ;
2018-12-11 15:08:26 +03:00
for ( order = get_order ( size ) ; order > = 0 ; order - - ) {
2018-10-12 13:08:50 +03:00
page = alloc_pages_node ( node , GFP_NOIO | __GFP_ZERO , order ) ;
if ( page ) {
* nr_zones = min_t ( unsigned int , * nr_zones ,
( PAGE_SIZE < < order ) / sizeof ( struct blk_zone ) ) ;
return page_address ( page ) ;
}
}
return NULL ;
}
void blk_queue_free_zone_bitmaps ( struct request_queue * q )
{
kfree ( q - > seq_zones_bitmap ) ;
q - > seq_zones_bitmap = NULL ;
kfree ( q - > seq_zones_wlock ) ;
q - > seq_zones_wlock = NULL ;
}
/**
* blk_revalidate_disk_zones - ( re ) allocate and initialize zone bitmaps
* @ disk : Target disk
*
* Helper function for low - level device drivers to ( re ) allocate and initialize
* a disk request queue zone bitmaps . This functions should normally be called
* within the disk - > revalidate method . For BIO based queues , no zone bitmap
* is allocated .
*/
int blk_revalidate_disk_zones ( struct gendisk * disk )
{
struct request_queue * q = disk - > queue ;
unsigned int nr_zones = __blkdev_nr_zones ( q , get_capacity ( disk ) ) ;
unsigned long * seq_zones_wlock = NULL , * seq_zones_bitmap = NULL ;
unsigned int i , rep_nr_zones = 0 , z = 0 , nrz ;
struct blk_zone * zones = NULL ;
sector_t sector = 0 ;
int ret = 0 ;
/*
* BIO based queues do not use a scheduler so only q - > nr_zones
* needs to be updated so that the sysfs exposed value is correct .
*/
2018-11-15 22:22:51 +03:00
if ( ! queue_is_mq ( q ) ) {
2018-10-12 13:08:50 +03:00
q - > nr_zones = nr_zones ;
return 0 ;
}
if ( ! blk_queue_is_zoned ( q ) | | ! nr_zones ) {
nr_zones = 0 ;
goto update ;
}
/* Allocate bitmaps */
ret = - ENOMEM ;
seq_zones_wlock = blk_alloc_zone_bitmap ( q - > node , nr_zones ) ;
if ( ! seq_zones_wlock )
goto out ;
seq_zones_bitmap = blk_alloc_zone_bitmap ( q - > node , nr_zones ) ;
if ( ! seq_zones_bitmap )
goto out ;
/* Get zone information and initialize seq_zones_bitmap */
rep_nr_zones = nr_zones ;
zones = blk_alloc_zones ( q - > node , & rep_nr_zones ) ;
if ( ! zones )
goto out ;
while ( z < nr_zones ) {
nrz = min ( nr_zones - z , rep_nr_zones ) ;
ret = blk_report_zones ( disk , sector , zones , & nrz , GFP_NOIO ) ;
if ( ret )
goto out ;
if ( ! nrz )
break ;
for ( i = 0 ; i < nrz ; i + + ) {
if ( zones [ i ] . type ! = BLK_ZONE_TYPE_CONVENTIONAL )
set_bit ( z , seq_zones_bitmap ) ;
z + + ;
}
sector + = nrz * blk_queue_zone_sectors ( q ) ;
}
if ( WARN_ON ( z ! = nr_zones ) ) {
ret = - EIO ;
goto out ;
}
update :
/*
* Install the new bitmaps , making sure the queue is stopped and
* all I / Os are completed ( i . e . a scheduler is not referencing the
* bitmaps ) .
*/
blk_mq_freeze_queue ( q ) ;
q - > nr_zones = nr_zones ;
swap ( q - > seq_zones_wlock , seq_zones_wlock ) ;
swap ( q - > seq_zones_bitmap , seq_zones_bitmap ) ;
blk_mq_unfreeze_queue ( q ) ;
out :
free_pages ( ( unsigned long ) zones ,
get_order ( rep_nr_zones * sizeof ( struct blk_zone ) ) ) ;
kfree ( seq_zones_wlock ) ;
kfree ( seq_zones_bitmap ) ;
if ( ret ) {
pr_warn ( " %s: failed to revalidate zones \n " , disk - > disk_name ) ;
blk_mq_freeze_queue ( q ) ;
blk_queue_free_zone_bitmaps ( q ) ;
blk_mq_unfreeze_queue ( q ) ;
}
return ret ;
}
EXPORT_SYMBOL_GPL ( blk_revalidate_disk_zones ) ;