2019-02-18 11:31:03 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
2016-06-13 17:45:26 +03:00
/*
* NVMe over Fabrics common host code .
* Copyright ( c ) 2015 - 2016 HGST , a Western Digital Company .
*/
# ifndef _NVME_FABRICS_H
# define _NVME_FABRICS_H 1
# include <linux/in.h>
# include <linux/inet.h>
# define NVMF_MIN_QUEUE_SIZE 16
# define NVMF_MAX_QUEUE_SIZE 1024
# define NVMF_DEF_QUEUE_SIZE 128
# define NVMF_DEF_RECONNECT_DELAY 10
2017-03-18 21:52:36 +03:00
/* default to 600 seconds of reconnect attempts before giving up */
# define NVMF_DEF_CTRL_LOSS_TMO 600
2016-06-13 17:45:26 +03:00
/*
* Define a host as seen by the target . We allocate one at boot , but also
* allow the override it when creating controllers . This is both to provide
* persistence of the Host NQN over multiple boots , and to allow using
* multiple ones , for example in a container scenario . Because we must not
* use different Host NQNs with the same Host ID we generate a Host ID and
* use this structure to keep track of the relation between the two .
*/
struct nvmf_host {
struct kref ref ;
struct list_head list ;
char nqn [ NVMF_NQN_SIZE ] ;
2017-05-17 10:54:27 +03:00
uuid_t id ;
2016-06-13 17:45:26 +03:00
} ;
/**
* enum nvmf_parsing_opts - used to define the sysfs parsing options used .
*/
enum {
NVMF_OPT_ERR = 0 ,
NVMF_OPT_TRANSPORT = 1 < < 0 ,
NVMF_OPT_NQN = 1 < < 1 ,
NVMF_OPT_TRADDR = 1 < < 2 ,
NVMF_OPT_TRSVCID = 1 < < 3 ,
NVMF_OPT_QUEUE_SIZE = 1 < < 4 ,
NVMF_OPT_NR_IO_QUEUES = 1 < < 5 ,
NVMF_OPT_TL_RETRY_COUNT = 1 < < 6 ,
2016-06-13 17:45:28 +03:00
NVMF_OPT_KATO = 1 < < 7 ,
2016-06-13 17:45:26 +03:00
NVMF_OPT_HOSTNQN = 1 < < 8 ,
NVMF_OPT_RECONNECT_DELAY = 1 < < 9 ,
2016-08-02 10:42:10 +03:00
NVMF_OPT_HOST_TRADDR = 1 < < 10 ,
2017-03-18 21:52:36 +03:00
NVMF_OPT_CTRL_LOSS_TMO = 1 < < 11 ,
2017-06-20 15:23:01 +03:00
NVMF_OPT_HOST_ID = 1 < < 12 ,
2017-10-21 02:17:06 +03:00
NVMF_OPT_DUP_CONNECT = 1 < < 13 ,
2018-11-20 01:11:15 +03:00
NVMF_OPT_DISABLE_SQFLOW = 1 < < 14 ,
2018-12-04 04:52:12 +03:00
NVMF_OPT_HDR_DIGEST = 1 < < 15 ,
2018-12-04 04:52:13 +03:00
NVMF_OPT_DATA_DIGEST = 1 < < 16 ,
2018-12-12 10:38:56 +03:00
NVMF_OPT_NR_WRITE_QUEUES = 1 < < 17 ,
2018-12-14 22:06:09 +03:00
NVMF_OPT_NR_POLL_QUEUES = 1 < < 18 ,
2019-08-18 12:08:51 +03:00
NVMF_OPT_TOS = 1 < < 19 ,
2016-06-13 17:45:26 +03:00
} ;
/**
* struct nvmf_ctrl_options - Used to hold the options specified
* with the parsing opts enum .
* @ mask : Used by the fabrics library to parse through sysfs options
* on adding a NVMe controller .
* @ transport : Holds the fabric transport " technology name " ( for a lack of
* better description ) that will be used by an NVMe controller
* being added .
* @ subsysnqn : Hold the fully qualified NQN subystem name ( format defined
* in the NVMe specification , " NVMe Qualified Names " ) .
2016-08-02 10:41:20 +03:00
* @ traddr : The transport - specific TRADDR field for a port on the
* subsystem which is adding a controller .
* @ trsvcid : The transport - specific TRSVCID field for a port on the
* subsystem which is adding a controller .
2016-08-02 10:42:10 +03:00
* @ host_traddr : A transport - specific field identifying the NVME host port
* to use for the connection to the controller .
2016-06-13 17:45:26 +03:00
* @ queue_size : Number of IO queue elements .
* @ nr_io_queues : Number of controller IO queues that will be established .
* @ reconnect_delay : Time between two consecutive reconnect attempts .
* @ discovery_nqn : indicates if the subsysnqn is the well - known discovery NQN .
2016-06-13 17:45:28 +03:00
* @ kato : Keep - alive timeout .
2016-06-13 17:45:26 +03:00
* @ host : Virtual NVMe host , contains the NQN and Host ID .
2017-03-18 21:52:36 +03:00
* @ max_reconnects : maximum number of allowed reconnect attempts before removing
* the controller , ( - 1 ) means reconnect forever , zero means remove
* immediately ;
2018-12-12 10:38:55 +03:00
* @ disable_sqflow : disable controller sq flow control
* @ hdr_digest : generate / verify header digest ( TCP )
* @ data_digest : generate / verify data digest ( TCP )
2018-12-12 10:38:56 +03:00
* @ nr_write_queues : number of queues for write I / O
2018-12-14 22:06:09 +03:00
* @ nr_poll_queues : number of queues for polling I / O
2019-08-18 12:08:51 +03:00
* @ tos : type of service
2016-06-13 17:45:26 +03:00
*/
struct nvmf_ctrl_options {
unsigned mask ;
char * transport ;
char * subsysnqn ;
char * traddr ;
char * trsvcid ;
2016-08-02 10:42:10 +03:00
char * host_traddr ;
2016-06-13 17:45:26 +03:00
size_t queue_size ;
unsigned int nr_io_queues ;
unsigned int reconnect_delay ;
bool discovery_nqn ;
2017-10-21 02:17:06 +03:00
bool duplicate_connect ;
2016-06-13 17:45:28 +03:00
unsigned int kato ;
2016-06-13 17:45:26 +03:00
struct nvmf_host * host ;
2017-03-18 21:52:36 +03:00
int max_reconnects ;
2018-11-20 01:11:15 +03:00
bool disable_sqflow ;
2018-12-04 04:52:12 +03:00
bool hdr_digest ;
2018-12-04 04:52:13 +03:00
bool data_digest ;
2018-12-12 10:38:56 +03:00
unsigned int nr_write_queues ;
2018-12-14 22:06:09 +03:00
unsigned int nr_poll_queues ;
2019-08-18 12:08:51 +03:00
int tos ;
2016-06-13 17:45:26 +03:00
} ;
/*
* struct nvmf_transport_ops - used to register a specific
* fabric implementation of NVMe fabrics .
* @ entry : Used by the fabrics library to add the new
* registration entry to its linked - list internal tree .
2017-12-25 15:18:30 +03:00
* @ module : Transport module reference
2016-06-13 17:45:26 +03:00
* @ name : Name of the NVMe fabric driver implementation .
* @ required_opts : sysfs command - line options that must be specified
* when adding a new NVMe controller .
* @ allowed_opts : sysfs command - line options that can be specified
* when adding a new NVMe controller .
* @ create_ctrl ( ) : function pointer that points to a non - NVMe
* implementation - specific fabric technology
* that would go into starting up that fabric
* for the purpose of conneciton to an NVMe controller
* using that fabric technology .
*
* Notes :
* 1. At minimum , ' required_opts ' and ' allowed_opts ' should
* be set to the same enum parsing options defined earlier .
* 2. create_ctrl ( ) must be defined ( even if it does nothing )
2018-06-01 10:11:20 +03:00
* 3. struct nvmf_transport_ops must be statically allocated in the
* modules . bss section so that a pure module_get on @ module
* prevents the memory from beeing freed .
2016-06-13 17:45:26 +03:00
*/
struct nvmf_transport_ops {
struct list_head entry ;
2017-12-25 15:18:30 +03:00
struct module * module ;
2016-06-13 17:45:26 +03:00
const char * name ;
int required_opts ;
int allowed_opts ;
struct nvme_ctrl * ( * create_ctrl ) ( struct device * dev ,
struct nvmf_ctrl_options * opts ) ;
} ;
2017-10-21 02:17:07 +03:00
static inline bool
nvmf_ctlr_matches_baseopts ( struct nvme_ctrl * ctrl ,
struct nvmf_ctrl_options * opts )
{
2018-05-26 00:02:23 +03:00
if ( ctrl - > state = = NVME_CTRL_DELETING | |
ctrl - > state = = NVME_CTRL_DEAD | |
strcmp ( opts - > subsysnqn , ctrl - > opts - > subsysnqn ) | |
2017-10-21 02:17:07 +03:00
strcmp ( opts - > host - > nqn , ctrl - > opts - > host - > nqn ) | |
memcmp ( & opts - > host - > id , & ctrl - > opts - > host - > id , sizeof ( uuid_t ) ) )
return false ;
return true ;
}
2016-06-13 17:45:26 +03:00
int nvmf_reg_read32 ( struct nvme_ctrl * ctrl , u32 off , u32 * val ) ;
int nvmf_reg_read64 ( struct nvme_ctrl * ctrl , u32 off , u64 * val ) ;
int nvmf_reg_write32 ( struct nvme_ctrl * ctrl , u32 off , u32 val ) ;
int nvmf_connect_admin_queue ( struct nvme_ctrl * ctrl ) ;
2018-12-14 22:06:08 +03:00
int nvmf_connect_io_queue ( struct nvme_ctrl * ctrl , u16 qid , bool poll ) ;
2017-01-27 11:03:45 +03:00
int nvmf_register_transport ( struct nvmf_transport_ops * ops ) ;
2016-06-13 17:45:26 +03:00
void nvmf_unregister_transport ( struct nvmf_transport_ops * ops ) ;
void nvmf_free_options ( struct nvmf_ctrl_options * opts ) ;
int nvmf_get_address ( struct nvme_ctrl * ctrl , char * buf , int size ) ;
2017-03-18 21:52:36 +03:00
bool nvmf_should_reconnect ( struct nvme_ctrl * ctrl ) ;
2018-07-21 01:49:48 +03:00
blk_status_t nvmf_fail_nonready_command ( struct nvme_ctrl * ctrl ,
struct request * rq ) ;
2018-06-11 18:34:06 +03:00
bool __nvmf_check_ready ( struct nvme_ctrl * ctrl , struct request * rq ,
bool queue_live ) ;
2018-10-19 03:40:40 +03:00
bool nvmf_ip_options_match ( struct nvme_ctrl * ctrl ,
struct nvmf_ctrl_options * opts ) ;
2018-06-11 18:34:06 +03:00
static inline bool nvmf_check_ready ( struct nvme_ctrl * ctrl , struct request * rq ,
bool queue_live )
{
2020-07-23 02:32:19 +03:00
if ( likely ( ctrl - > state = = NVME_CTRL_LIVE | |
ctrl - > state = = NVME_CTRL_DELETING ) )
2018-06-11 18:34:06 +03:00
return true ;
return __nvmf_check_ready ( ctrl , rq , queue_live ) ;
}
2017-10-24 15:25:20 +03:00
2016-06-13 17:45:26 +03:00
# endif /* _NVME_FABRICS_H */