2005-04-17 02:20:36 +04:00
/*
* Copyright ( c ) 2004 Topspin Communications . All rights reserved .
2006-01-31 01:31:33 +03:00
* Copyright ( c ) 2005 , 2006 Cisco Systems . All rights reserved .
2005-08-11 10:03:10 +04:00
* Copyright ( c ) 2005 Mellanox Technologies . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# ifndef MTHCA_PROVIDER_H
# define MTHCA_PROVIDER_H
2005-08-26 00:40:04 +04:00
# include <rdma/ib_verbs.h>
# include <rdma/ib_pack.h>
2005-04-17 02:20:36 +04:00
# define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
# define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
# define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)
# define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)
# define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)
struct mthca_buf_list {
void * buf ;
2010-04-02 08:29:39 +04:00
DEFINE_DMA_UNMAP_ADDR ( mapping ) ;
2005-04-17 02:20:36 +04:00
} ;
2005-08-19 00:39:31 +04:00
union mthca_buf {
struct mthca_buf_list direct ;
struct mthca_buf_list * page_list ;
} ;
2005-04-17 02:20:36 +04:00
struct mthca_uar {
unsigned long pfn ;
int index ;
} ;
2005-07-08 04:57:16 +04:00
struct mthca_user_db_table ;
struct mthca_ucontext {
struct ib_ucontext ibucontext ;
struct mthca_uar uar ;
struct mthca_user_db_table * db_tab ;
2008-04-30 00:46:53 +04:00
int reg_mr_warned ;
2005-07-08 04:57:16 +04:00
} ;
2005-06-28 01:36:43 +04:00
struct mthca_mtt ;
2005-04-17 02:20:36 +04:00
struct mthca_mr {
2005-06-28 01:36:43 +04:00
struct ib_mr ibmr ;
2007-03-05 03:15:11 +03:00
struct ib_umem * umem ;
2005-06-28 01:36:43 +04:00
struct mthca_mtt * mtt ;
2005-04-17 02:20:36 +04:00
} ;
struct mthca_pd {
struct ib_pd ibpd ;
u32 pd_num ;
atomic_t sqp_count ;
struct mthca_mr ntmr ;
2005-07-08 04:57:18 +04:00
int privileged ;
2005-04-17 02:20:36 +04:00
} ;
struct mthca_eq {
struct mthca_dev * dev ;
int eqn ;
u32 eqn_mask ;
u32 cons_index ;
u16 msi_x_vector ;
u16 msi_x_entry ;
int have_irq ;
int nent ;
struct mthca_buf_list * page_list ;
struct mthca_mr mr ;
2009-09-06 07:36:15 +04:00
char irq_name [ IB_DEVICE_NAME_MAX ] ;
2005-04-17 02:20:36 +04:00
} ;
struct mthca_av ;
enum mthca_ah_type {
MTHCA_AH_ON_HCA ,
MTHCA_AH_PCI_POOL ,
MTHCA_AH_KMALLOC
} ;
struct mthca_ah {
struct ib_ah ibah ;
enum mthca_ah_type type ;
u32 key ;
struct mthca_av * av ;
dma_addr_t avdma ;
} ;
/*
* Quick description of our CQ / QP locking scheme :
*
* We have one global lock that protects dev - > cq / qp_table . Each
* struct mthca_cq / qp also has its own lock . An individual qp lock
* may be taken inside of an individual cq lock . Both cqs attached to
2006-08-11 19:56:57 +04:00
* a qp may be locked , with the cq with the lower cqn locked first .
* No other nesting should be done .
2005-04-17 02:20:36 +04:00
*
2006-05-09 21:50:29 +04:00
* Each struct mthca_cq / qp also has an ref count , protected by the
* corresponding table lock . The pointer from the cq / qp_table to the
* struct counts as one reference . This reference also is good for
* access through the consumer API , so modifying the CQ / QP etc doesn ' t
* need to take another reference . Access to a QP because of a
* completion being polled does not need a reference either .
2005-04-17 02:20:36 +04:00
*
* Finally , each struct mthca_cq / qp has a wait_queue_head_t for the
* destroy function to sleep on .
*
* This means that access from the consumer API requires nothing but
* taking the struct ' s lock .
*
* Access because of a completion event should go as follows :
* - lock cq / qp_table and look up struct
* - increment ref count in struct
* - drop cq / qp_table lock
* - lock struct , do your thing , and unlock struct
* - decrement ref count ; if zero , wake up waiters
*
* To destroy a CQ / QP , we can do the following :
2006-05-09 21:50:29 +04:00
* - lock cq / qp_table
* - remove pointer and decrement ref count
* - unlock cq / qp_table lock
2005-04-17 02:20:36 +04:00
* - wait_event until ref count is zero
*
* It is the consumer ' s responsibilty to make sure that no QP
2006-01-31 01:31:33 +03:00
* operations ( WQE posting or state modification ) are pending when a
2005-04-17 02:20:36 +04:00
* QP is destroyed . Also , the consumer must make sure that calls to
2006-01-31 01:31:33 +03:00
* qp_modify are serialized . Similarly , the consumer is responsible
* for ensuring that no CQ resize operations are pending when a CQ
* is destroyed .
2005-04-17 02:20:36 +04:00
*
* Possible optimizations ( wait for profile data to see if / where we
* have locks bouncing between CPUs ) :
* - split cq / qp table lock into n separate ( cache - aligned ) locks ,
* indexed ( say ) by the page in the table
* - split QP struct lock into three ( one for common info , one for the
* send queue and one for the receive queue )
*/
2006-01-31 01:31:33 +03:00
struct mthca_cq_buf {
union mthca_buf queue ;
struct mthca_mr mr ;
int is_direct ;
} ;
struct mthca_cq_resize {
struct mthca_cq_buf buf ;
int cqe ;
enum {
CQ_RESIZE_ALLOC ,
CQ_RESIZE_READY ,
CQ_RESIZE_SWAPPED
} state ;
} ;
2005-04-17 02:20:36 +04:00
struct mthca_cq {
2006-01-31 01:31:33 +03:00
struct ib_cq ibcq ;
spinlock_t lock ;
2006-05-09 21:50:29 +04:00
int refcount ;
2006-01-31 01:31:33 +03:00
int cqn ;
u32 cons_index ;
struct mthca_cq_buf buf ;
struct mthca_cq_resize * resize_buf ;
int is_kernel ;
2005-04-17 02:20:36 +04:00
/* Next fields are Arbel only */
2006-01-31 01:31:33 +03:00
int set_ci_db_index ;
__be32 * set_ci_db ;
int arm_db_index ;
__be32 * arm_db ;
int arm_sn ;
2005-04-17 02:20:36 +04:00
2006-01-31 01:31:33 +03:00
wait_queue_head_t wait ;
2006-06-18 07:37:41 +04:00
struct mutex mutex ;
2005-04-17 02:20:36 +04:00
} ;
2005-08-19 21:59:31 +04:00
struct mthca_srq {
struct ib_srq ibsrq ;
spinlock_t lock ;
2006-05-09 21:50:29 +04:00
int refcount ;
2005-08-19 21:59:31 +04:00
int srqn ;
int max ;
int max_gs ;
int wqe_shift ;
int first_free ;
int last_free ;
u16 counter ; /* Arbel only */
int db_index ; /* Arbel only */
__be32 * db ; /* Arbel only */
void * last ;
int is_direct ;
u64 * wrid ;
union mthca_buf queue ;
struct mthca_mr mr ;
wait_queue_head_t wait ;
2006-06-18 07:37:41 +04:00
struct mutex mutex ;
2005-08-19 21:59:31 +04:00
} ;
2005-04-17 02:20:36 +04:00
struct mthca_wq {
spinlock_t lock ;
int max ;
unsigned next_ind ;
unsigned last_comp ;
unsigned head ;
unsigned tail ;
void * last ;
int max_gs ;
int wqe_shift ;
int db_index ; /* Arbel only */
2005-08-14 08:05:57 +04:00
__be32 * db ;
2005-04-17 02:20:36 +04:00
} ;
2020-09-26 13:24:49 +03:00
struct mthca_sqp {
int pkey_index ;
u32 qkey ;
u32 send_psn ;
struct ib_ud_header ud_header ;
int header_buf_size ;
void * header_buf ;
dma_addr_t header_dma ;
} ;
2005-04-17 02:20:36 +04:00
struct mthca_qp {
struct ib_qp ibqp ;
2006-05-09 21:50:29 +04:00
int refcount ;
2005-04-17 02:20:36 +04:00
u32 qpn ;
int is_direct ;
2006-04-10 20:43:47 +04:00
u8 port ; /* for SQP and memfree use only */
u8 alt_port ; /* for memfree use only */
2005-04-17 02:20:36 +04:00
u8 transport ;
u8 state ;
u8 atomic_rd_en ;
u8 resp_depth ;
struct mthca_mr mr ;
struct mthca_wq rq ;
struct mthca_wq sq ;
enum ib_sig_type sq_policy ;
int send_wqe_offset ;
2005-11-09 22:26:07 +03:00
int max_inline_data ;
2005-04-17 02:20:36 +04:00
u64 * wrid ;
2005-08-19 00:39:31 +04:00
union mthca_buf queue ;
2005-04-17 02:20:36 +04:00
wait_queue_head_t wait ;
2006-06-18 07:37:41 +04:00
struct mutex mutex ;
2020-09-26 13:24:49 +03:00
struct mthca_sqp * sqp ;
2005-04-17 02:20:36 +04:00
} ;
2005-07-08 04:57:16 +04:00
static inline struct mthca_ucontext * to_mucontext ( struct ib_ucontext * ibucontext )
{
return container_of ( ibucontext , struct mthca_ucontext , ibucontext ) ;
}
2005-04-17 02:20:36 +04:00
static inline struct mthca_mr * to_mmr ( struct ib_mr * ibmr )
{
return container_of ( ibmr , struct mthca_mr , ibmr ) ;
}
static inline struct mthca_pd * to_mpd ( struct ib_pd * ibpd )
{
return container_of ( ibpd , struct mthca_pd , ibpd ) ;
}
static inline struct mthca_ah * to_mah ( struct ib_ah * ibah )
{
return container_of ( ibah , struct mthca_ah , ibah ) ;
}
static inline struct mthca_cq * to_mcq ( struct ib_cq * ibcq )
{
return container_of ( ibcq , struct mthca_cq , ibcq ) ;
}
2005-08-19 21:59:31 +04:00
static inline struct mthca_srq * to_msrq ( struct ib_srq * ibsrq )
{
return container_of ( ibsrq , struct mthca_srq , ibsrq ) ;
}
2005-04-17 02:20:36 +04:00
static inline struct mthca_qp * to_mqp ( struct ib_qp * ibqp )
{
return container_of ( ibqp , struct mthca_qp , ibqp ) ;
}
# endif /* MTHCA_PROVIDER_H */