2005-11-03 01:07:13 +03:00
/*
* Copyright ( c ) 2005 Cisco Systems . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# ifndef IB_SRP_H
# define IB_SRP_H
# include <linux/types.h>
# include <linux/list.h>
2006-01-31 02:21:21 +03:00
# include <linux/mutex.h>
2006-03-25 02:47:26 +03:00
# include <linux/scatterlist.h>
2005-11-03 01:07:13 +03:00
# include <scsi/scsi_host.h>
# include <scsi/scsi_cmnd.h>
# include <rdma/ib_verbs.h>
# include <rdma/ib_sa.h>
# include <rdma/ib_cm.h>
2018-01-23 01:27:12 +03:00
# include <rdma/rdma_cm.h>
2005-11-03 01:07:13 +03:00
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000 ,
SRP_ABORT_TIMEOUT_MS = 5000 ,
SRP_PORT_REDIRECT = 1 ,
SRP_DLID_REDIRECT = 2 ,
2008-01-09 01:08:52 +03:00
SRP_STALE_CONN = 3 ,
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:32 +04:00
SRP_DEF_SG_TABLESIZE = 12 ,
2005-11-03 01:07:13 +03:00
2013-10-26 16:40:37 +04:00
SRP_DEFAULT_QUEUE_SIZE = 1 < < 6 ,
2010-08-30 23:27:20 +04:00
SRP_RSP_SQ_SIZE = 1 ,
SRP_TSK_MGMT_SQ_SIZE = 1 ,
2013-10-26 16:40:37 +04:00
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
SRP_TSK_MGMT_SQ_SIZE ,
2005-11-03 01:07:13 +03:00
2010-11-26 21:02:21 +03:00
SRP_TAG_NO_REQ = ~ 0U ,
SRP_TAG_TSK_MGMT = 1U < < 31 ,
2006-06-18 07:37:29 +04:00
2014-05-20 17:07:45 +04:00
SRP_MAX_PAGES_PER_MR = 512 ,
2018-12-18 00:20:35 +03:00
SRP_MAX_ADD_CDB_LEN = 16 ,
2018-12-18 00:20:39 +03:00
SRP_MAX_IMM_SGE = 2 ,
SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1 ,
/*
* Choose the immediate data offset such that a 32 byte CDB still fits .
*/
SRP_IMM_DATA_OFFSET = sizeof ( struct srp_cmd ) +
SRP_MAX_ADD_CDB_LEN +
sizeof ( struct srp_imm_buf ) ,
2005-11-03 01:07:13 +03:00
} ;
enum srp_target_state {
2014-10-30 16:47:22 +03:00
SRP_TARGET_SCANNING ,
2005-11-03 01:07:13 +03:00
SRP_TARGET_LIVE ,
2011-12-26 20:49:18 +04:00
SRP_TARGET_REMOVED ,
2005-11-03 01:07:13 +03:00
} ;
2010-10-08 22:40:47 +04:00
enum srp_iu_type {
SRP_IU_CMD ,
SRP_IU_TSK_MGMT ,
SRP_IU_RSP ,
2007-12-20 01:08:43 +03:00
} ;
2014-05-20 17:08:34 +04:00
/*
* @ mr_page_mask : HCA memory registration page mask .
* @ mr_page_size : HCA memory registration page size .
2020-05-28 22:45:44 +03:00
* @ mr_max_size : Maximum size in bytes of a single FR registration request .
2014-05-20 17:08:34 +04:00
*/
2006-06-18 07:37:29 +04:00
struct srp_device {
struct list_head dev_list ;
2005-11-03 01:07:13 +03:00
struct ib_device * dev ;
struct ib_pd * pd ;
2017-10-11 20:27:28 +03:00
u32 global_rkey ;
2014-05-20 17:07:45 +04:00
u64 mr_page_mask ;
int mr_page_size ;
int mr_max_size ;
int max_pages_per_mr ;
2014-05-20 17:08:34 +04:00
bool has_fr ;
bool use_fast_reg ;
2006-06-18 07:37:29 +04:00
} ;
struct srp_host {
2008-03-06 02:13:36 +03:00
struct srp_device * srp_dev ;
2006-06-18 07:37:29 +04:00
u8 port ;
2008-02-22 02:13:36 +03:00
struct device dev ;
2005-11-03 01:07:13 +03:00
struct list_head target_list ;
2006-06-18 07:37:30 +04:00
spinlock_t target_lock ;
2005-11-03 01:07:13 +03:00
struct completion released ;
struct list_head list ;
2014-03-14 16:52:45 +04:00
struct mutex add_target_mutex ;
2005-11-03 01:07:13 +03:00
} ;
struct srp_request {
struct scsi_cmnd * scmnd ;
struct srp_iu * cmd ;
2020-05-28 22:45:44 +03:00
struct srp_fr_desc * * fr_list ;
2011-01-16 21:57:10 +03:00
struct srp_direct_buf * indirect_desc ;
dma_addr_t indirect_dma_addr ;
2014-05-20 17:07:45 +04:00
short nmdesc ;
2015-11-13 14:57:13 +03:00
struct ib_cqe reg_cqe ;
2005-11-03 01:07:13 +03:00
} ;
2014-10-30 16:48:30 +03:00
/**
* struct srp_rdma_ch
* @ comp_vector : Completion vector used by this RDMA channel .
2018-12-18 00:20:38 +03:00
* @ max_it_iu_len : Maximum initiator - to - target information unit length .
2018-12-18 00:20:37 +03:00
* @ max_ti_iu_len : Maximum target - to - initiator information unit length .
2014-10-30 16:48:30 +03:00
*/
struct srp_rdma_ch {
2010-11-26 23:34:46 +03:00
/* These are RW in the hot path, and commonly used together */
struct list_head free_tx ;
spinlock_t lock ;
s32 req_lim ;
/* These are read-only in the hot path */
2014-10-30 16:48:30 +03:00
struct srp_target_port * target ____cacheline_aligned_in_smp ;
struct ib_cq * send_cq ;
2010-11-26 23:34:46 +03:00
struct ib_cq * recv_cq ;
struct ib_qp * qp ;
2020-05-28 22:45:44 +03:00
struct srp_fr_pool * fr_pool ;
2018-12-18 00:20:38 +03:00
uint32_t max_it_iu_len ;
2018-12-18 00:20:37 +03:00
uint32_t max_ti_iu_len ;
2019-10-01 02:16:59 +03:00
u8 max_imm_sge ;
2018-12-18 00:20:39 +03:00
bool use_imm_data ;
2014-10-30 16:48:30 +03:00
/* Everything above this point is used in the hot path of
* command processing . Try to keep them packed into cachelines .
*/
struct completion done ;
int status ;
2018-01-23 01:27:12 +03:00
union {
struct ib_cm {
struct sa_path_rec path ;
struct ib_sa_query * path_query ;
int path_query_id ;
struct ib_cm_id * cm_id ;
} ib_cm ;
struct rdma_cm {
struct rdma_cm_id * cm_id ;
} rdma_cm ;
} ;
2014-10-30 16:48:30 +03:00
struct srp_iu * * tx_ring ;
struct srp_iu * * rx_ring ;
struct srp_request * req_ring ;
int comp_vector ;
2017-02-14 21:56:31 +03:00
u64 tsk_mgmt_tag ;
2014-10-30 16:48:30 +03:00
struct completion tsk_mgmt_done ;
u8 tsk_mgmt_status ;
2015-05-18 14:23:57 +03:00
bool connected ;
2014-10-30 16:48:30 +03:00
} ;
/**
* struct srp_target_port
* @ comp_vector : Completion vector used by the first RDMA channel created for
* this target port .
*/
struct srp_target_port {
/* read and written in the hot path */
spinlock_t lock ;
/* read only in the hot path */
2017-10-11 20:27:28 +03:00
u32 global_rkey ;
2014-10-06 19:14:36 +04:00
struct srp_rdma_ch * ch ;
2018-01-23 01:27:12 +03:00
struct net * net ;
2014-10-06 19:14:36 +04:00
u32 ch_count ;
2010-11-26 23:34:46 +03:00
u32 lkey ;
enum srp_target_state state ;
RDMA/srp: Add parse function for maximum initiator to target IU size
According to SRP specifications 'srp-r16a' and 'srp2r06',
IOControllerProfile attributes for SRP target port include the maximum
initiator to target IU size.
SRP connection daemons, such as srp_daemon, can get the value from the
subnet manager. The SRP connection daemon can pass this value to kernel.
This patch adds a parse function for it.
Upstream commit [1] enables the kernel parameter, 'use_imm_data', by
default. [1] also use (8 * 1024) as the default value for kernel parameter
'max_imm_data'. With those default values, the maximum initiator to target
IU size will be 8260.
In case the SRPT modules, which include the in-tree 'ib_srpt.ko' module,
do not support SRP-2 'immediate data' feature, the default maximum
initiator to target IU size is significantly smaller than 8260. For
'ib_srpt.ko' module, which built from source before [2], the default
maximum initiator to target IU is 2116.
[1] introduces a regression issue for old srp targets with default kernel
parameters, as the connection will be rejected because of a too large
maximum initiator to target IU size.
[1] commit 882981f4a411 ("RDMA/srp: Add support for immediate data")
[2] commit 5dabcd0456d7 ("RDMA/srpt: Add support for immediate data")
Link: https://lore.kernel.org/r/20190927174352.7800-1-honli@redhat.com
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Honggang Li <honli@redhat.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2019-09-27 20:43:51 +03:00
uint32_t max_it_iu_size ;
2011-01-15 02:23:24 +03:00
unsigned int cmd_sg_cnt ;
2011-01-16 21:57:10 +03:00
unsigned int indirect_size ;
bool allow_ext_sg ;
2010-11-26 23:34:46 +03:00
2014-10-30 16:48:30 +03:00
/* other member variables */
2014-10-30 16:48:05 +03:00
union ib_gid sgid ;
2005-11-03 01:07:13 +03:00
__be64 id_ext ;
__be64 ioc_guid ;
2006-10-04 17:28:56 +04:00
__be64 initiator_ext ;
2006-06-18 07:37:38 +04:00
u16 io_class ;
2005-11-03 01:07:13 +03:00
struct srp_host * srp_host ;
struct Scsi_Host * scsi_host ;
2013-10-26 16:32:30 +04:00
struct srp_rport * rport ;
2005-11-03 01:07:13 +03:00
char target_name [ 32 ] ;
unsigned int scsi_id ;
2011-01-16 21:57:10 +03:00
unsigned int sg_tablesize ;
2018-01-23 01:27:13 +03:00
unsigned int target_can_queue ;
2016-04-23 00:13:57 +03:00
int mr_pool_size ;
2016-05-12 20:50:35 +03:00
int mr_per_cmd ;
2013-10-26 16:40:37 +04:00
int queue_size ;
int req_ring_size ;
2013-06-28 16:57:42 +04:00
int comp_vector ;
2013-10-26 16:31:27 +04:00
int tl_retry_count ;
2005-11-03 01:07:13 +03:00
2018-01-23 01:27:12 +03:00
bool using_rdma_cm ;
union {
struct {
__be64 service_id ;
union ib_gid orig_dgid ;
__be16 pkey ;
} ib_cm ;
struct {
union {
struct sockaddr_in ip4 ;
struct sockaddr_in6 ip6 ;
2019-10-01 02:16:58 +03:00
struct sockaddr sa ;
2018-01-23 01:27:12 +03:00
struct sockaddr_storage ss ;
} src ;
union {
struct sockaddr_in ip4 ;
struct sockaddr_in6 ip6 ;
2019-10-01 02:16:58 +03:00
struct sockaddr sa ;
2018-01-23 01:27:12 +03:00
struct sockaddr_storage ss ;
} dst ;
bool src_specified ;
} rdma_cm ;
} ;
2005-11-03 01:07:13 +03:00
2011-09-03 11:34:48 +04:00
u32 rq_tmo_jiffies ;
2006-06-18 07:37:33 +04:00
int zero_req_lim ;
2013-10-26 16:35:08 +04:00
struct work_struct tl_err_work ;
2011-12-26 20:49:18 +04:00
struct work_struct remove_work ;
2005-11-03 01:07:13 +03:00
struct list_head list ;
2011-09-03 11:25:42 +04:00
bool qp_in_error ;
2005-11-03 01:07:13 +03:00
} ;
struct srp_iu {
2010-11-26 21:22:48 +03:00
struct list_head list ;
2006-12-13 01:30:55 +03:00
u64 dma ;
2005-11-03 01:07:13 +03:00
void * buf ;
size_t size ;
enum dma_data_direction direction ;
2018-12-18 00:20:39 +03:00
u32 num_sge ;
struct ib_sge sge [ SRP_MAX_SGE ] ;
2015-11-13 14:57:13 +03:00
struct ib_cqe cqe ;
2005-11-03 01:07:13 +03:00
} ;
2014-05-20 17:08:34 +04:00
/**
* struct srp_fr_desc - fast registration work request arguments
* @ entry : Entry in srp_fr_pool . free_list .
* @ mr : Memory region .
* @ frpl : Fast registration page list .
*/
struct srp_fr_desc {
struct list_head entry ;
struct ib_mr * mr ;
} ;
/**
* struct srp_fr_pool - pool of fast registration descriptors
*
* An entry is available for allocation if and only if it occurs in @ free_list .
*
* @ size : Number of descriptors in this pool .
* @ max_page_list_len : Maximum fast registration work request page list length .
* @ lock : Protects free_list .
* @ free_list : List of free descriptors .
* @ desc : Fast registration descriptor pool .
*/
struct srp_fr_pool {
int size ;
int max_page_list_len ;
spinlock_t lock ;
struct list_head free_list ;
2020-02-13 04:04:25 +03:00
struct srp_fr_desc desc [ ] ;
2014-05-20 17:08:34 +04:00
} ;
/**
* struct srp_map_state - per - request DMA memory mapping state
* @ desc : Pointer to the element of the SRP buffer descriptor array
* that is being filled in .
* @ pages : Array with DMA addresses of pages being considered for
* memory registration .
* @ base_dma_addr : DMA address of the first page that has not yet been mapped .
2020-05-28 22:45:44 +03:00
* @ dma_len : Number of bytes that will be registered with the next FR
* memory registration call .
2014-05-20 17:08:34 +04:00
* @ total_len : Total number of bytes in the sg - list being mapped .
* @ npages : Number of page addresses in the pages [ ] array .
2020-05-28 22:45:44 +03:00
* @ nmdesc : Number of FR memory descriptors used for mapping .
2014-05-20 17:08:34 +04:00
* @ ndesc : Number of SRP buffer descriptors that have been filled in .
*/
2011-01-15 03:45:50 +03:00
struct srp_map_state {
2014-05-20 17:08:34 +04:00
union {
2015-08-11 03:07:27 +03:00
struct {
struct srp_fr_desc * * next ;
struct srp_fr_desc * * end ;
} fr ;
2015-08-11 03:09:05 +03:00
struct {
void * * next ;
void * * end ;
} gen ;
2014-05-20 17:08:34 +04:00
} ;
2011-01-15 03:45:50 +03:00
struct srp_direct_buf * desc ;
2015-10-13 19:11:39 +03:00
union {
u64 * pages ;
struct scatterlist * sg ;
} ;
2011-01-15 03:45:50 +03:00
dma_addr_t base_dma_addr ;
2014-05-20 17:07:45 +04:00
u32 dma_len ;
2011-01-15 03:45:50 +03:00
u32 total_len ;
2015-12-01 21:19:38 +03:00
unsigned int npages ;
2014-05-20 17:07:45 +04:00
unsigned int nmdesc ;
2011-01-15 03:45:50 +03:00
unsigned int ndesc ;
} ;
2005-11-03 01:07:13 +03:00
# endif /* IB_SRP_H */