2018-07-06 20:38:38 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef __BLK_NULL_BLK_H
# define __BLK_NULL_BLK_H
2019-09-16 17:07:59 +03:00
# undef pr_fmt
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2018-07-06 20:38:38 +03:00
# include <linux/blkdev.h>
# include <linux/slab.h>
# include <linux/blk-mq.h>
# include <linux/hrtimer.h>
# include <linux/configfs.h>
# include <linux/badblocks.h>
# include <linux/fault-inject.h>
struct nullb_cmd {
struct request * rq ;
struct bio * bio ;
unsigned int tag ;
blk_status_t error ;
struct nullb_queue * nq ;
struct hrtimer timer ;
} ;
struct nullb_queue {
unsigned long * tag_map ;
wait_queue_head_t wait ;
unsigned int queue_depth ;
struct nullb_device * dev ;
unsigned int requeue_selection ;
struct nullb_cmd * cmds ;
} ;
struct nullb_device {
struct nullb * nullb ;
struct config_item item ;
struct radix_tree_root data ; /* data stored in the disk */
struct radix_tree_root cache ; /* disk cache data */
unsigned long flags ; /* device flags */
unsigned int curr_cache ;
struct badblocks badblocks ;
2018-07-06 20:38:39 +03:00
unsigned int nr_zones ;
2020-08-28 13:54:00 +03:00
unsigned int nr_zones_imp_open ;
unsigned int nr_zones_exp_open ;
unsigned int nr_zones_closed ;
2018-07-06 20:38:39 +03:00
struct blk_zone * zones ;
sector_t zone_size_sects ;
2020-11-06 14:01:41 +03:00
spinlock_t zone_lock ;
2020-10-29 14:05:00 +03:00
unsigned long * zone_locks ;
2018-07-06 20:38:39 +03:00
2018-07-06 20:38:38 +03:00
unsigned long size ; /* device size in MB */
unsigned long completion_nsec ; /* time in ns to complete a request */
unsigned long cache_size ; /* disk cache size in MB */
2018-07-06 20:38:39 +03:00
unsigned long zone_size ; /* zone size in MB if device is zoned */
2020-06-29 22:06:38 +03:00
unsigned long zone_capacity ; /* zone capacity in MB if device is zoned */
2018-10-30 10:14:05 +03:00
unsigned int zone_nr_conv ; /* number of conventional zones */
2020-08-28 13:54:00 +03:00
unsigned int zone_max_open ; /* max number of open zones */
unsigned int zone_max_active ; /* max number of active zones */
2018-07-06 20:38:38 +03:00
unsigned int submit_queues ; /* number of submission queues */
unsigned int home_node ; /* home node for the device */
unsigned int queue_mode ; /* block interface */
unsigned int blocksize ; /* block size */
unsigned int irqmode ; /* IRQ completion handler */
unsigned int hw_queue_depth ; /* queue depth */
unsigned int index ; /* index of the disk, only valid with a disk */
unsigned int mbps ; /* Bandwidth throttle cap (in MB/s) */
bool blocking ; /* blocking blk-mq device */
bool use_per_node_hctx ; /* use per-node allocation for hardware context */
bool power ; /* power on/off the device */
bool memory_backed ; /* if data is stored in memory */
bool discard ; /* if support discard */
2018-07-06 20:38:39 +03:00
bool zoned ; /* if device is zoned */
2018-07-06 20:38:38 +03:00
} ;
struct nullb {
struct nullb_device * dev ;
struct list_head list ;
unsigned int index ;
struct request_queue * q ;
struct gendisk * disk ;
struct blk_mq_tag_set * tag_set ;
struct blk_mq_tag_set __tag_set ;
unsigned int queue_depth ;
atomic_long_t cur_bytes ;
struct hrtimer bw_timer ;
unsigned long cache_flush_pos ;
spinlock_t lock ;
struct nullb_queue * queues ;
unsigned int nr_queues ;
char disk_name [ DISK_NAME_LEN ] ;
} ;
2018-07-06 20:38:39 +03:00
2020-04-23 06:02:37 +03:00
blk_status_t null_process_cmd ( struct nullb_cmd * cmd ,
enum req_opf op , sector_t sector ,
unsigned int nr_sectors ) ;
2018-07-06 20:38:39 +03:00
# ifdef CONFIG_BLK_DEV_ZONED
2020-04-23 06:02:38 +03:00
int null_init_zoned_dev ( struct nullb_device * dev , struct request_queue * q ) ;
int null_register_zoned_dev ( struct nullb * nullb ) ;
void null_free_zoned_dev ( struct nullb_device * dev ) ;
2019-11-11 05:39:27 +03:00
int null_report_zones ( struct gendisk * disk , sector_t sector ,
2019-11-11 05:39:30 +03:00
unsigned int nr_zones , report_zones_cb cb , void * data ) ;
2020-04-23 06:02:37 +03:00
blk_status_t null_process_zoned_cmd ( struct nullb_cmd * cmd ,
enum req_opf op , sector_t sector ,
sector_t nr_sectors ) ;
2019-10-18 00:19:43 +03:00
size_t null_zone_valid_read_len ( struct nullb * nullb ,
sector_t sector , unsigned int len ) ;
2018-07-06 20:38:39 +03:00
# else
2020-04-23 06:02:38 +03:00
static inline int null_init_zoned_dev ( struct nullb_device * dev ,
struct request_queue * q )
2018-07-06 20:38:39 +03:00
{
2019-09-16 17:07:59 +03:00
pr_err ( " CONFIG_BLK_DEV_ZONED not enabled \n " ) ;
2018-07-06 20:38:39 +03:00
return - EINVAL ;
}
2020-04-23 06:02:38 +03:00
static inline int null_register_zoned_dev ( struct nullb * nullb )
{
return - ENODEV ;
}
static inline void null_free_zoned_dev ( struct nullb_device * dev ) { }
2020-04-23 06:02:37 +03:00
static inline blk_status_t null_process_zoned_cmd ( struct nullb_cmd * cmd ,
enum req_opf op , sector_t sector , sector_t nr_sectors )
2018-09-13 03:21:11 +03:00
{
2019-08-23 07:45:18 +03:00
return BLK_STS_NOTSUPP ;
2018-09-13 03:21:11 +03:00
}
2019-10-18 00:19:43 +03:00
static inline size_t null_zone_valid_read_len ( struct nullb * nullb ,
sector_t sector ,
unsigned int len )
{
return len ;
}
2019-11-11 05:39:27 +03:00
# define null_report_zones NULL
2018-07-06 20:38:39 +03:00
# endif /* CONFIG_BLK_DEV_ZONED */
2018-07-06 20:38:38 +03:00
# endif /* __NULL_BLK_H */