2005-11-02 14:07:13 -08:00
/*
* Copyright ( c ) 2005 Cisco Systems . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# ifndef IB_SRP_H
# define IB_SRP_H
# include <linux/types.h>
# include <linux/list.h>
2006-01-30 15:21:21 -08:00
# include <linux/mutex.h>
2006-03-24 15:47:26 -08:00
# include <linux/scatterlist.h>
2005-11-02 14:07:13 -08:00
# include <scsi/scsi_host.h>
# include <scsi/scsi_cmnd.h>
# include <rdma/ib_verbs.h>
# include <rdma/ib_sa.h>
# include <rdma/ib_cm.h>
2006-06-17 20:37:29 -07:00
# include <rdma/ib_fmr_pool.h>
2005-11-02 14:07:13 -08:00
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000 ,
SRP_ABORT_TIMEOUT_MS = 5000 ,
SRP_PORT_REDIRECT = 1 ,
SRP_DLID_REDIRECT = 2 ,
2008-01-08 17:08:52 -05:00
SRP_STALE_CONN = 3 ,
2005-11-02 14:07:13 -08:00
2005-11-11 14:06:01 -08:00
SRP_MAX_LUN = 512 ,
2006-06-17 20:37:32 -07:00
SRP_DEF_SG_TABLESIZE = 12 ,
2005-11-02 14:07:13 -08:00
2013-10-26 14:40:37 +02:00
SRP_DEFAULT_QUEUE_SIZE = 1 < < 6 ,
2010-08-30 19:27:20 +00:00
SRP_RSP_SQ_SIZE = 1 ,
SRP_TSK_MGMT_SQ_SIZE = 1 ,
2013-10-26 14:40:37 +02:00
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
SRP_TSK_MGMT_SQ_SIZE ,
2005-11-02 14:07:13 -08:00
2010-11-26 13:02:21 -05:00
SRP_TAG_NO_REQ = ~ 0U ,
SRP_TAG_TSK_MGMT = 1U < < 31 ,
2006-06-17 20:37:29 -07:00
2014-05-20 15:07:45 +02:00
SRP_MAX_PAGES_PER_MR = 512 ,
2011-01-14 19:45:50 -05:00
2014-05-20 15:08:34 +02:00
LOCAL_INV_WR_ID_MASK = 1 ,
FAST_REG_WR_ID_MASK = 2 ,
2005-11-02 14:07:13 -08:00
} ;
enum srp_target_state {
SRP_TARGET_LIVE ,
2011-12-26 16:49:18 +00:00
SRP_TARGET_REMOVED ,
2005-11-02 14:07:13 -08:00
} ;
2010-10-08 14:40:47 -04:00
enum srp_iu_type {
SRP_IU_CMD ,
SRP_IU_TSK_MGMT ,
SRP_IU_RSP ,
2007-12-19 17:08:43 -05:00
} ;
2014-05-20 15:08:34 +02:00
/*
* @ mr_page_mask : HCA memory registration page mask .
* @ mr_page_size : HCA memory registration page size .
* @ mr_max_size : Maximum size in bytes of a single FMR / FR registration
* request .
*/
2006-06-17 20:37:29 -07:00
struct srp_device {
struct list_head dev_list ;
2005-11-02 14:07:13 -08:00
struct ib_device * dev ;
struct ib_pd * pd ;
struct ib_mr * mr ;
2014-05-20 15:07:45 +02:00
u64 mr_page_mask ;
int mr_page_size ;
int mr_max_size ;
int max_pages_per_mr ;
2014-05-20 15:07:20 +02:00
bool has_fmr ;
2014-05-20 15:08:34 +02:00
bool has_fr ;
bool use_fast_reg ;
2006-06-17 20:37:29 -07:00
} ;
struct srp_host {
2008-03-06 00:13:36 +01:00
struct srp_device * srp_dev ;
2006-06-17 20:37:29 -07:00
u8 port ;
2008-02-22 00:13:36 +01:00
struct device dev ;
2005-11-02 14:07:13 -08:00
struct list_head target_list ;
2006-06-17 20:37:30 -07:00
spinlock_t target_lock ;
2005-11-02 14:07:13 -08:00
struct completion released ;
struct list_head list ;
2014-03-14 13:52:45 +01:00
struct mutex add_target_mutex ;
2005-11-02 14:07:13 -08:00
} ;
struct srp_request {
struct list_head list ;
struct scsi_cmnd * scmnd ;
struct srp_iu * cmd ;
2014-05-20 15:08:34 +02:00
union {
struct ib_pool_fmr * * fmr_list ;
struct srp_fr_desc * * fr_list ;
} ;
2011-01-14 19:45:50 -05:00
u64 * map_page ;
2011-01-16 13:57:10 -05:00
struct srp_direct_buf * indirect_desc ;
dma_addr_t indirect_dma_addr ;
2014-05-20 15:07:45 +02:00
short nmdesc ;
2006-05-09 10:50:28 -07:00
short index ;
2005-11-02 14:07:13 -08:00
} ;
struct srp_target_port {
2010-11-26 15:34:46 -05:00
/* These are RW in the hot path, and commonly used together */
struct list_head free_tx ;
struct list_head free_reqs ;
spinlock_t lock ;
s32 req_lim ;
/* These are read-only in the hot path */
struct ib_cq * send_cq ____cacheline_aligned_in_smp ;
struct ib_cq * recv_cq ;
struct ib_qp * qp ;
2014-05-20 15:08:34 +02:00
union {
struct ib_fmr_pool * fmr_pool ;
struct srp_fr_pool * fr_pool ;
} ;
2010-11-26 15:34:46 -05:00
u32 lkey ;
u32 rkey ;
enum srp_target_state state ;
2011-01-14 18:23:24 -05:00
unsigned int max_iu_len ;
unsigned int cmd_sg_cnt ;
2011-01-16 13:57:10 -05:00
unsigned int indirect_size ;
bool allow_ext_sg ;
2010-11-26 15:34:46 -05:00
/* Everything above this point is used in the hot path of
* command processing . Try to keep them packed into cachelines .
*/
2005-11-02 14:07:13 -08:00
__be64 id_ext ;
__be64 ioc_guid ;
__be64 service_id ;
2006-10-04 15:28:56 +02:00
__be64 initiator_ext ;
2006-06-17 20:37:38 -07:00
u16 io_class ;
2005-11-02 14:07:13 -08:00
struct srp_host * srp_host ;
struct Scsi_Host * scsi_host ;
2013-10-26 14:32:30 +02:00
struct srp_rport * rport ;
2005-11-02 14:07:13 -08:00
char target_name [ 32 ] ;
unsigned int scsi_id ;
2011-01-16 13:57:10 -05:00
unsigned int sg_tablesize ;
2013-10-26 14:40:37 +02:00
int queue_size ;
int req_ring_size ;
2013-06-28 14:57:42 +02:00
int comp_vector ;
2013-10-26 14:31:27 +02:00
int tl_retry_count ;
2005-11-02 14:07:13 -08:00
struct ib_sa_path_rec path ;
2007-05-06 21:18:11 -07:00
__be16 orig_dgid [ 8 ] ;
2005-11-02 14:07:13 -08:00
struct ib_sa_query * path_query ;
int path_query_id ;
2011-09-03 09:34:48 +02:00
u32 rq_tmo_jiffies ;
2011-12-25 12:18:12 +00:00
bool connected ;
2011-09-03 09:34:48 +02:00
2005-11-02 14:07:13 -08:00
struct ib_cm_id * cm_id ;
int max_ti_iu_len ;
2006-06-17 20:37:33 -07:00
int zero_req_lim ;
2013-10-26 14:40:37 +02:00
struct srp_iu * * tx_ring ;
struct srp_iu * * rx_ring ;
struct srp_request * req_ring ;
2005-11-02 14:07:13 -08:00
2013-10-26 14:35:08 +02:00
struct work_struct tl_err_work ;
2011-12-26 16:49:18 +00:00
struct work_struct remove_work ;
2005-11-02 14:07:13 -08:00
struct list_head list ;
struct completion done ;
int status ;
2011-09-03 09:25:42 +02:00
bool qp_in_error ;
2010-11-26 13:02:21 -05:00
struct completion tsk_mgmt_done ;
u8 tsk_mgmt_status ;
2005-11-02 14:07:13 -08:00
} ;
struct srp_iu {
2010-11-26 13:22:48 -05:00
struct list_head list ;
2006-12-12 14:30:55 -08:00
u64 dma ;
2005-11-02 14:07:13 -08:00
void * buf ;
size_t size ;
enum dma_data_direction direction ;
} ;
2014-05-20 15:08:34 +02:00
/**
* struct srp_fr_desc - fast registration work request arguments
* @ entry : Entry in srp_fr_pool . free_list .
* @ mr : Memory region .
* @ frpl : Fast registration page list .
*/
struct srp_fr_desc {
struct list_head entry ;
struct ib_mr * mr ;
struct ib_fast_reg_page_list * frpl ;
} ;
/**
* struct srp_fr_pool - pool of fast registration descriptors
*
* An entry is available for allocation if and only if it occurs in @ free_list .
*
* @ size : Number of descriptors in this pool .
* @ max_page_list_len : Maximum fast registration work request page list length .
* @ lock : Protects free_list .
* @ free_list : List of free descriptors .
* @ desc : Fast registration descriptor pool .
*/
struct srp_fr_pool {
int size ;
int max_page_list_len ;
spinlock_t lock ;
struct list_head free_list ;
struct srp_fr_desc desc [ 0 ] ;
} ;
/**
* struct srp_map_state - per - request DMA memory mapping state
* @ desc : Pointer to the element of the SRP buffer descriptor array
* that is being filled in .
* @ pages : Array with DMA addresses of pages being considered for
* memory registration .
* @ base_dma_addr : DMA address of the first page that has not yet been mapped .
* @ dma_len : Number of bytes that will be registered with the next
* FMR or FR memory registration call .
* @ total_len : Total number of bytes in the sg - list being mapped .
* @ npages : Number of page addresses in the pages [ ] array .
* @ nmdesc : Number of FMR or FR memory descriptors used for mapping .
* @ ndesc : Number of SRP buffer descriptors that have been filled in .
* @ unmapped_sg : First element of the sg - list that is mapped via FMR or FR .
* @ unmapped_index : Index of the first element mapped via FMR or FR .
* @ unmapped_addr : DMA address of the first element mapped via FMR or FR .
*/
2011-01-14 19:45:50 -05:00
struct srp_map_state {
2014-05-20 15:08:34 +02:00
union {
struct ib_pool_fmr * * next_fmr ;
struct srp_fr_desc * * next_fr ;
} ;
2011-01-14 19:45:50 -05:00
struct srp_direct_buf * desc ;
u64 * pages ;
dma_addr_t base_dma_addr ;
2014-05-20 15:07:45 +02:00
u32 dma_len ;
2011-01-14 19:45:50 -05:00
u32 total_len ;
unsigned int npages ;
2014-05-20 15:07:45 +02:00
unsigned int nmdesc ;
2011-01-14 19:45:50 -05:00
unsigned int ndesc ;
struct scatterlist * unmapped_sg ;
int unmapped_index ;
dma_addr_t unmapped_addr ;
} ;
2005-11-02 14:07:13 -08:00
# endif /* IB_SRP_H */