2005-04-16 15:20:36 -07:00
/*
* Copyright ( c ) 2004 Topspin Communications . All rights reserved .
2005-07-07 17:57:20 -07:00
* Copyright ( c ) 2005 Cisco Systems . All rights reserved .
2005-08-10 23:03:10 -07:00
* Copyright ( c ) 2005 Mellanox Technologies . All rights reserved .
2006-02-01 13:38:24 -08:00
* Copyright ( c ) 2004 Voltaire , Inc . All rights reserved .
2005-04-16 15:20:36 -07:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
* $ Id : mthca_qp . c 1355 2004 - 12 - 17 15 : 23 : 43 Z roland $
*/
# include <linux/init.h>
2005-10-30 15:03:48 -08:00
# include <linux/string.h>
# include <linux/slab.h>
2005-04-16 15:20:36 -07:00
2005-08-25 13:40:04 -07:00
# include <rdma/ib_verbs.h>
# include <rdma/ib_cache.h>
# include <rdma/ib_pack.h>
2005-04-16 15:20:36 -07:00
# include "mthca_dev.h"
# include "mthca_cmd.h"
# include "mthca_memfree.h"
2005-08-19 10:33:35 -07:00
# include "mthca_wqe.h"
2005-04-16 15:20:36 -07:00
enum {
MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE ,
MTHCA_ACK_REQ_FREQ = 10 ,
MTHCA_FLIGHT_LIMIT = 9 ,
2005-07-07 17:57:20 -07:00
MTHCA_UD_HEADER_SIZE = 72 , /* largest UD header possible */
MTHCA_INLINE_HEADER_SIZE = 4 , /* data segment overhead for inline */
MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
2005-04-16 15:20:36 -07:00
} ;
enum {
MTHCA_QP_STATE_RST = 0 ,
MTHCA_QP_STATE_INIT = 1 ,
MTHCA_QP_STATE_RTR = 2 ,
MTHCA_QP_STATE_RTS = 3 ,
MTHCA_QP_STATE_SQE = 4 ,
MTHCA_QP_STATE_SQD = 5 ,
MTHCA_QP_STATE_ERR = 6 ,
MTHCA_QP_STATE_DRAINING = 7
} ;
enum {
MTHCA_QP_ST_RC = 0x0 ,
MTHCA_QP_ST_UC = 0x1 ,
MTHCA_QP_ST_RD = 0x2 ,
MTHCA_QP_ST_UD = 0x3 ,
MTHCA_QP_ST_MLX = 0x7
} ;
enum {
MTHCA_QP_PM_MIGRATED = 0x3 ,
MTHCA_QP_PM_ARMED = 0x0 ,
MTHCA_QP_PM_REARM = 0x1
} ;
enum {
/* qp_context flags */
MTHCA_QP_BIT_DE = 1 < < 8 ,
/* params1 */
MTHCA_QP_BIT_SRE = 1 < < 15 ,
MTHCA_QP_BIT_SWE = 1 < < 14 ,
MTHCA_QP_BIT_SAE = 1 < < 13 ,
MTHCA_QP_BIT_SIC = 1 < < 4 ,
MTHCA_QP_BIT_SSC = 1 < < 3 ,
/* params2 */
MTHCA_QP_BIT_RRE = 1 < < 15 ,
MTHCA_QP_BIT_RWE = 1 < < 14 ,
MTHCA_QP_BIT_RAE = 1 < < 13 ,
MTHCA_QP_BIT_RIC = 1 < < 4 ,
MTHCA_QP_BIT_RSC = 1 < < 3
} ;
struct mthca_qp_path {
2005-08-13 21:05:57 -07:00
__be32 port_pkey ;
u8 rnr_retry ;
u8 g_mylmc ;
__be16 rlid ;
u8 ackto ;
u8 mgid_index ;
u8 static_rate ;
u8 hop_limit ;
__be32 sl_tclass_flowlabel ;
u8 rgid [ 16 ] ;
2005-04-16 15:20:36 -07:00
} __attribute__ ( ( packed ) ) ;
struct mthca_qp_context {
2005-08-13 21:05:57 -07:00
__be32 flags ;
__be32 tavor_sched_queue ; /* Reserved on Arbel */
u8 mtu_msgmax ;
u8 rq_size_stride ; /* Reserved on Tavor */
u8 sq_size_stride ; /* Reserved on Tavor */
u8 rlkey_arbel_sched_queue ; /* Reserved on Tavor */
__be32 usr_page ;
__be32 local_qpn ;
__be32 remote_qpn ;
u32 reserved1 [ 2 ] ;
2005-04-16 15:20:36 -07:00
struct mthca_qp_path pri_path ;
struct mthca_qp_path alt_path ;
2005-08-13 21:05:57 -07:00
__be32 rdd ;
__be32 pd ;
__be32 wqe_base ;
__be32 wqe_lkey ;
__be32 params1 ;
__be32 reserved2 ;
__be32 next_send_psn ;
__be32 cqn_snd ;
__be32 snd_wqe_base_l ; /* Next send WQE on Tavor */
__be32 snd_db_index ; /* (debugging only entries) */
__be32 last_acked_psn ;
__be32 ssn ;
__be32 params2 ;
__be32 rnr_nextrecvpsn ;
__be32 ra_buff_indx ;
__be32 cqn_rcv ;
__be32 rcv_wqe_base_l ; /* Next recv WQE on Tavor */
__be32 rcv_db_index ; /* (debugging only entries) */
__be32 qkey ;
__be32 srqn ;
__be32 rmsn ;
__be16 rq_wqe_counter ; /* reserved on Tavor */
__be16 sq_wqe_counter ; /* reserved on Tavor */
u32 reserved3 [ 18 ] ;
2005-04-16 15:20:36 -07:00
} __attribute__ ( ( packed ) ) ;
struct mthca_qp_param {
2005-08-13 21:05:57 -07:00
__be32 opt_param_mask ;
u32 reserved1 ;
2005-04-16 15:20:36 -07:00
struct mthca_qp_context context ;
2005-08-13 21:05:57 -07:00
u32 reserved2 [ 62 ] ;
2005-04-16 15:20:36 -07:00
} __attribute__ ( ( packed ) ) ;
enum {
MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 < < 0 ,
MTHCA_QP_OPTPAR_RRE = 1 < < 1 ,
MTHCA_QP_OPTPAR_RAE = 1 < < 2 ,
MTHCA_QP_OPTPAR_RWE = 1 < < 3 ,
MTHCA_QP_OPTPAR_PKEY_INDEX = 1 < < 4 ,
MTHCA_QP_OPTPAR_Q_KEY = 1 < < 5 ,
MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 < < 6 ,
MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 < < 7 ,
MTHCA_QP_OPTPAR_SRA_MAX = 1 < < 8 ,
MTHCA_QP_OPTPAR_RRA_MAX = 1 < < 9 ,
MTHCA_QP_OPTPAR_PM_STATE = 1 < < 10 ,
MTHCA_QP_OPTPAR_PORT_NUM = 1 < < 11 ,
MTHCA_QP_OPTPAR_RETRY_COUNT = 1 < < 12 ,
MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 < < 13 ,
MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 < < 14 ,
MTHCA_QP_OPTPAR_RNR_RETRY = 1 < < 15 ,
MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 < < 16
} ;
static const u8 mthca_opcode [ ] = {
[ IB_WR_SEND ] = MTHCA_OPCODE_SEND ,
[ IB_WR_SEND_WITH_IMM ] = MTHCA_OPCODE_SEND_IMM ,
[ IB_WR_RDMA_WRITE ] = MTHCA_OPCODE_RDMA_WRITE ,
[ IB_WR_RDMA_WRITE_WITH_IMM ] = MTHCA_OPCODE_RDMA_WRITE_IMM ,
[ IB_WR_RDMA_READ ] = MTHCA_OPCODE_RDMA_READ ,
[ IB_WR_ATOMIC_CMP_AND_SWP ] = MTHCA_OPCODE_ATOMIC_CS ,
[ IB_WR_ATOMIC_FETCH_AND_ADD ] = MTHCA_OPCODE_ATOMIC_FA ,
} ;
static int is_sqp ( struct mthca_dev * dev , struct mthca_qp * qp )
{
return qp - > qpn > = dev - > qp_table . sqp_start & &
qp - > qpn < = dev - > qp_table . sqp_start + 3 ;
}
static int is_qp0 ( struct mthca_dev * dev , struct mthca_qp * qp )
{
return qp - > qpn > = dev - > qp_table . sqp_start & &
qp - > qpn < = dev - > qp_table . sqp_start + 1 ;
}
static void * get_recv_wqe ( struct mthca_qp * qp , int n )
{
if ( qp - > is_direct )
return qp - > queue . direct . buf + ( n < < qp - > rq . wqe_shift ) ;
else
return qp - > queue . page_list [ ( n < < qp - > rq . wqe_shift ) > > PAGE_SHIFT ] . buf +
( ( n < < qp - > rq . wqe_shift ) & ( PAGE_SIZE - 1 ) ) ;
}
static void * get_send_wqe ( struct mthca_qp * qp , int n )
{
if ( qp - > is_direct )
return qp - > queue . direct . buf + qp - > send_wqe_offset +
( n < < qp - > sq . wqe_shift ) ;
else
return qp - > queue . page_list [ ( qp - > send_wqe_offset +
( n < < qp - > sq . wqe_shift ) ) > >
PAGE_SHIFT ] . buf +
( ( qp - > send_wqe_offset + ( n < < qp - > sq . wqe_shift ) ) &
( PAGE_SIZE - 1 ) ) ;
}
2005-09-07 09:43:23 -07:00
static void mthca_wq_init ( struct mthca_wq * wq )
{
spin_lock_init ( & wq - > lock ) ;
wq - > next_ind = 0 ;
wq - > last_comp = wq - > max - 1 ;
wq - > head = 0 ;
wq - > tail = 0 ;
}
2005-04-16 15:20:36 -07:00
void mthca_qp_event ( struct mthca_dev * dev , u32 qpn ,
enum ib_event_type event_type )
{
struct mthca_qp * qp ;
struct ib_event event ;
spin_lock ( & dev - > qp_table . lock ) ;
qp = mthca_array_get ( & dev - > qp_table . qp , qpn & ( dev - > limits . num_qps - 1 ) ) ;
if ( qp )
2006-05-09 10:50:29 -07:00
+ + qp - > refcount ;
2005-04-16 15:20:36 -07:00
spin_unlock ( & dev - > qp_table . lock ) ;
if ( ! qp ) {
mthca_warn ( dev , " Async event for bogus QP %08x \n " , qpn ) ;
return ;
}
2006-04-10 09:43:47 -07:00
if ( event_type = = IB_EVENT_PATH_MIG )
qp - > port = qp - > alt_port ;
2005-04-16 15:20:36 -07:00
event . device = & dev - > ib_dev ;
event . event = event_type ;
event . element . qp = & qp - > ibqp ;
if ( qp - > ibqp . event_handler )
qp - > ibqp . event_handler ( & event , qp - > ibqp . qp_context ) ;
2006-05-09 10:50:29 -07:00
spin_lock ( & dev - > qp_table . lock ) ;
if ( ! - - qp - > refcount )
2005-04-16 15:20:36 -07:00
wake_up ( & qp - > wait ) ;
2006-05-09 10:50:29 -07:00
spin_unlock ( & dev - > qp_table . lock ) ;
2005-04-16 15:20:36 -07:00
}
static int to_mthca_state ( enum ib_qp_state ib_state )
{
switch ( ib_state ) {
case IB_QPS_RESET : return MTHCA_QP_STATE_RST ;
case IB_QPS_INIT : return MTHCA_QP_STATE_INIT ;
case IB_QPS_RTR : return MTHCA_QP_STATE_RTR ;
case IB_QPS_RTS : return MTHCA_QP_STATE_RTS ;
case IB_QPS_SQD : return MTHCA_QP_STATE_SQD ;
case IB_QPS_SQE : return MTHCA_QP_STATE_SQE ;
case IB_QPS_ERR : return MTHCA_QP_STATE_ERR ;
default : return - 1 ;
}
}
enum { RC , UC , UD , RD , RDEE , MLX , NUM_TRANS } ;
static int to_mthca_st ( int transport )
{
switch ( transport ) {
case RC : return MTHCA_QP_ST_RC ;
case UC : return MTHCA_QP_ST_UC ;
case UD : return MTHCA_QP_ST_UD ;
case RD : return MTHCA_QP_ST_RD ;
case MLX : return MTHCA_QP_ST_MLX ;
default : return - 1 ;
}
}
static void store_attrs ( struct mthca_sqp * sqp , struct ib_qp_attr * attr ,
int attr_mask )
{
if ( attr_mask & IB_QP_PKEY_INDEX )
sqp - > pkey_index = attr - > pkey_index ;
if ( attr_mask & IB_QP_QKEY )
sqp - > qkey = attr - > qkey ;
if ( attr_mask & IB_QP_SQ_PSN )
sqp - > send_psn = attr - > sq_psn ;
}
static void init_port ( struct mthca_dev * dev , int port )
{
int err ;
u8 status ;
struct mthca_init_ib_param param ;
memset ( & param , 0 , sizeof param ) ;
2005-08-17 07:39:10 -07:00
param . port_width = dev - > limits . port_width_cap ;
param . vl_cap = dev - > limits . vl_cap ;
param . mtu_cap = dev - > limits . mtu_cap ;
param . gid_cap = dev - > limits . gid_table_len ;
param . pkey_cap = dev - > limits . pkey_table_len ;
2005-04-16 15:20:36 -07:00
err = mthca_INIT_IB ( dev , & param , port , & status ) ;
if ( err )
mthca_warn ( dev , " INIT_IB failed, return code %d. \n " , err ) ;
if ( status )
mthca_warn ( dev , " INIT_IB returned status %02x. \n " , status ) ;
}
2005-12-15 14:36:24 -08:00
static __be32 get_hw_access_flags ( struct mthca_qp * qp , struct ib_qp_attr * attr ,
int attr_mask )
{
u8 dest_rd_atomic ;
u32 access_flags ;
u32 hw_access_flags = 0 ;
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC )
dest_rd_atomic = attr - > max_dest_rd_atomic ;
else
dest_rd_atomic = qp - > resp_depth ;
if ( attr_mask & IB_QP_ACCESS_FLAGS )
access_flags = attr - > qp_access_flags ;
else
access_flags = qp - > atomic_rd_en ;
if ( ! dest_rd_atomic )
access_flags & = IB_ACCESS_REMOTE_WRITE ;
if ( access_flags & IB_ACCESS_REMOTE_READ )
hw_access_flags | = MTHCA_QP_BIT_RRE ;
if ( access_flags & IB_ACCESS_REMOTE_ATOMIC )
hw_access_flags | = MTHCA_QP_BIT_RAE ;
if ( access_flags & IB_ACCESS_REMOTE_WRITE )
hw_access_flags | = MTHCA_QP_BIT_RWE ;
return cpu_to_be32 ( hw_access_flags ) ;
}
2006-02-13 16:40:21 -08:00
static inline enum ib_qp_state to_ib_qp_state ( int mthca_state )
{
switch ( mthca_state ) {
case MTHCA_QP_STATE_RST : return IB_QPS_RESET ;
case MTHCA_QP_STATE_INIT : return IB_QPS_INIT ;
case MTHCA_QP_STATE_RTR : return IB_QPS_RTR ;
case MTHCA_QP_STATE_RTS : return IB_QPS_RTS ;
case MTHCA_QP_STATE_DRAINING :
case MTHCA_QP_STATE_SQD : return IB_QPS_SQD ;
case MTHCA_QP_STATE_SQE : return IB_QPS_SQE ;
case MTHCA_QP_STATE_ERR : return IB_QPS_ERR ;
default : return - 1 ;
}
}
static inline enum ib_mig_state to_ib_mig_state ( int mthca_mig_state )
{
switch ( mthca_mig_state ) {
case 0 : return IB_MIG_ARMED ;
case 1 : return IB_MIG_REARM ;
case 3 : return IB_MIG_MIGRATED ;
default : return - 1 ;
}
}
static int to_ib_qp_access_flags ( int mthca_flags )
{
int ib_flags = 0 ;
if ( mthca_flags & MTHCA_QP_BIT_RRE )
ib_flags | = IB_ACCESS_REMOTE_READ ;
if ( mthca_flags & MTHCA_QP_BIT_RWE )
ib_flags | = IB_ACCESS_REMOTE_WRITE ;
if ( mthca_flags & MTHCA_QP_BIT_RAE )
ib_flags | = IB_ACCESS_REMOTE_ATOMIC ;
return ib_flags ;
}
static void to_ib_ah_attr ( struct mthca_dev * dev , struct ib_ah_attr * ib_ah_attr ,
struct mthca_qp_path * path )
{
memset ( ib_ah_attr , 0 , sizeof * path ) ;
ib_ah_attr - > port_num = ( be32_to_cpu ( path - > port_pkey ) > > 24 ) & 0x3 ;
2006-04-10 09:43:47 -07:00
if ( ib_ah_attr - > port_num = = 0 | | ib_ah_attr - > port_num > dev - > limits . num_ports )
return ;
2006-02-13 16:40:21 -08:00
ib_ah_attr - > dlid = be16_to_cpu ( path - > rlid ) ;
ib_ah_attr - > sl = be32_to_cpu ( path - > sl_tclass_flowlabel ) > > 28 ;
ib_ah_attr - > src_path_bits = path - > g_mylmc & 0x7f ;
2006-04-10 09:43:47 -07:00
ib_ah_attr - > static_rate = mthca_rate_to_ib ( dev ,
path - > static_rate & 0x7 ,
ib_ah_attr - > port_num ) ;
2006-02-13 16:40:21 -08:00
ib_ah_attr - > ah_flags = ( path - > g_mylmc & ( 1 < < 7 ) ) ? IB_AH_GRH : 0 ;
if ( ib_ah_attr - > ah_flags ) {
ib_ah_attr - > grh . sgid_index = path - > mgid_index & ( dev - > limits . gid_table_len - 1 ) ;
ib_ah_attr - > grh . hop_limit = path - > hop_limit ;
ib_ah_attr - > grh . traffic_class =
( be32_to_cpu ( path - > sl_tclass_flowlabel ) > > 20 ) & 0xff ;
ib_ah_attr - > grh . flow_label =
be32_to_cpu ( path - > sl_tclass_flowlabel ) & 0xfffff ;
memcpy ( ib_ah_attr - > grh . dgid . raw ,
path - > rgid , sizeof ib_ah_attr - > grh . dgid . raw ) ;
}
}
int mthca_query_qp ( struct ib_qp * ibqp , struct ib_qp_attr * qp_attr , int qp_attr_mask ,
struct ib_qp_init_attr * qp_init_attr )
{
struct mthca_dev * dev = to_mdev ( ibqp - > device ) ;
struct mthca_qp * qp = to_mqp ( ibqp ) ;
int err ;
struct mthca_mailbox * mailbox ;
struct mthca_qp_param * qp_param ;
struct mthca_qp_context * context ;
int mthca_state ;
u8 status ;
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) )
return PTR_ERR ( mailbox ) ;
err = mthca_QUERY_QP ( dev , qp - > qpn , 0 , mailbox , & status ) ;
if ( err )
goto out ;
if ( status ) {
mthca_warn ( dev , " QUERY_QP returned status %02x \n " , status ) ;
err = - EINVAL ;
goto out ;
}
qp_param = mailbox - > buf ;
context = & qp_param - > context ;
mthca_state = be32_to_cpu ( context - > flags ) > > 28 ;
qp_attr - > qp_state = to_ib_qp_state ( mthca_state ) ;
qp_attr - > cur_qp_state = qp_attr - > qp_state ;
qp_attr - > path_mtu = context - > mtu_msgmax > > 5 ;
qp_attr - > path_mig_state =
to_ib_mig_state ( ( be32_to_cpu ( context - > flags ) > > 11 ) & 0x3 ) ;
qp_attr - > qkey = be32_to_cpu ( context - > qkey ) ;
qp_attr - > rq_psn = be32_to_cpu ( context - > rnr_nextrecvpsn ) & 0xffffff ;
qp_attr - > sq_psn = be32_to_cpu ( context - > next_send_psn ) & 0xffffff ;
qp_attr - > dest_qp_num = be32_to_cpu ( context - > remote_qpn ) & 0xffffff ;
qp_attr - > qp_access_flags =
to_ib_qp_access_flags ( be32_to_cpu ( context - > params2 ) ) ;
qp_attr - > cap . max_send_wr = qp - > sq . max ;
qp_attr - > cap . max_recv_wr = qp - > rq . max ;
qp_attr - > cap . max_send_sge = qp - > sq . max_gs ;
qp_attr - > cap . max_recv_sge = qp - > rq . max_gs ;
qp_attr - > cap . max_inline_data = qp - > max_inline_data ;
2006-04-10 09:43:47 -07:00
if ( qp - > transport = = RC | | qp - > transport = = UC ) {
to_ib_ah_attr ( dev , & qp_attr - > ah_attr , & context - > pri_path ) ;
to_ib_ah_attr ( dev , & qp_attr - > alt_ah_attr , & context - > alt_path ) ;
}
2006-02-13 16:40:21 -08:00
qp_attr - > pkey_index = be32_to_cpu ( context - > pri_path . port_pkey ) & 0x7f ;
qp_attr - > alt_pkey_index = be32_to_cpu ( context - > alt_path . port_pkey ) & 0x7f ;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
qp_attr - > sq_draining = mthca_state = = MTHCA_QP_STATE_DRAINING ;
qp_attr - > max_rd_atomic = 1 < < ( ( be32_to_cpu ( context - > params1 ) > > 21 ) & 0x7 ) ;
qp_attr - > max_dest_rd_atomic =
1 < < ( ( be32_to_cpu ( context - > params2 ) > > 21 ) & 0x7 ) ;
qp_attr - > min_rnr_timer =
( be32_to_cpu ( context - > rnr_nextrecvpsn ) > > 24 ) & 0x1f ;
qp_attr - > port_num = qp_attr - > ah_attr . port_num ;
qp_attr - > timeout = context - > pri_path . ackto > > 3 ;
qp_attr - > retry_cnt = ( be32_to_cpu ( context - > params1 ) > > 16 ) & 0x7 ;
qp_attr - > rnr_retry = context - > pri_path . rnr_retry > > 5 ;
qp_attr - > alt_port_num = qp_attr - > alt_ah_attr . port_num ;
qp_attr - > alt_timeout = context - > alt_path . ackto > > 3 ;
qp_init_attr - > cap = qp_attr - > cap ;
out :
mthca_free_mailbox ( dev , mailbox ) ;
return err ;
}
2006-03-19 17:20:36 +02:00
static int mthca_path_set ( struct mthca_dev * dev , struct ib_ah_attr * ah ,
2006-04-10 09:43:47 -07:00
struct mthca_qp_path * path , u8 port )
2006-01-06 13:23:58 -08:00
{
path - > g_mylmc = ah - > src_path_bits & 0x7f ;
path - > rlid = cpu_to_be16 ( ah - > dlid ) ;
2006-04-10 09:43:47 -07:00
path - > static_rate = mthca_get_rate ( dev , ah - > static_rate , port ) ;
2006-01-06 13:23:58 -08:00
if ( ah - > ah_flags & IB_AH_GRH ) {
2006-03-19 17:20:36 +02:00
if ( ah - > grh . sgid_index > = dev - > limits . gid_table_len ) {
mthca_dbg ( dev , " sgid_index (%u) too large. max is %d \n " ,
ah - > grh . sgid_index , dev - > limits . gid_table_len - 1 ) ;
return - 1 ;
}
2006-01-06 13:23:58 -08:00
path - > g_mylmc | = 1 < < 7 ;
path - > mgid_index = ah - > grh . sgid_index ;
path - > hop_limit = ah - > grh . hop_limit ;
2006-02-01 13:38:24 -08:00
path - > sl_tclass_flowlabel =
2006-01-06 13:23:58 -08:00
cpu_to_be32 ( ( ah - > sl < < 28 ) |
2006-02-01 13:38:24 -08:00
( ah - > grh . traffic_class < < 20 ) |
2006-01-06 13:23:58 -08:00
( ah - > grh . flow_label ) ) ;
memcpy ( path - > rgid , ah - > grh . dgid . raw , 16 ) ;
} else
path - > sl_tclass_flowlabel = cpu_to_be32 ( ah - > sl < < 28 ) ;
2006-03-19 17:20:36 +02:00
return 0 ;
2006-01-06 13:23:58 -08:00
}
2005-04-16 15:20:36 -07:00
int mthca_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr , int attr_mask )
{
struct mthca_dev * dev = to_mdev ( ibqp - > device ) ;
struct mthca_qp * qp = to_mqp ( ibqp ) ;
enum ib_qp_state cur_state , new_state ;
2005-06-27 14:36:45 -07:00
struct mthca_mailbox * mailbox ;
2005-04-16 15:20:36 -07:00
struct mthca_qp_param * qp_param ;
struct mthca_qp_context * qp_context ;
2006-02-03 14:53:28 -08:00
u32 sqd_event = 0 ;
2005-04-16 15:20:36 -07:00
u8 status ;
int err ;
if ( attr_mask & IB_QP_CUR_STATE ) {
2006-02-13 16:30:18 -08:00
cur_state = attr - > cur_qp_state ;
2005-04-16 15:20:36 -07:00
} else {
spin_lock_irq ( & qp - > sq . lock ) ;
spin_lock ( & qp - > rq . lock ) ;
cur_state = qp - > state ;
spin_unlock ( & qp - > rq . lock ) ;
spin_unlock_irq ( & qp - > sq . lock ) ;
}
2006-02-13 16:30:18 -08:00
new_state = attr_mask & IB_QP_STATE ? attr - > qp_state : cur_state ;
2005-04-16 15:20:36 -07:00
2006-02-13 16:30:18 -08:00
if ( ! ib_modify_qp_is_ok ( cur_state , new_state , ibqp - > qp_type , attr_mask ) ) {
mthca_dbg ( dev , " Bad QP transition (transport %d) "
" %d->%d with attr 0x%08x \n " ,
qp - > transport , cur_state , new_state ,
attr_mask ) ;
2005-04-16 15:20:36 -07:00
return - EINVAL ;
}
2006-02-01 13:38:24 -08:00
if ( ( attr_mask & IB_QP_PKEY_INDEX ) & &
2005-11-03 14:58:33 -08:00
attr - > pkey_index > = dev - > limits . pkey_table_len ) {
2006-03-01 14:28:12 -08:00
mthca_dbg ( dev , " P_Key index (%u) too large. max is %d \n " ,
attr - > pkey_index , dev - > limits . pkey_table_len - 1 ) ;
2005-11-03 14:58:33 -08:00
return - EINVAL ;
}
2006-01-05 16:13:46 -08:00
if ( ( attr_mask & IB_QP_PORT ) & &
( attr - > port_num = = 0 | | attr - > port_num > dev - > limits . num_ports ) ) {
mthca_dbg ( dev , " Port number (%u) is invalid \n " , attr - > port_num ) ;
return - EINVAL ;
}
2005-12-09 16:32:21 -08:00
if ( attr_mask & IB_QP_MAX_QP_RD_ATOMIC & &
attr - > max_rd_atomic > dev - > limits . max_qp_init_rdma ) {
mthca_dbg ( dev , " Max rdma_atomic as initiator %u too large (max is %d) \n " ,
attr - > max_rd_atomic , dev - > limits . max_qp_init_rdma ) ;
return - EINVAL ;
}
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC & &
attr - > max_dest_rd_atomic > 1 < < dev - > qp_table . rdb_shift ) {
mthca_dbg ( dev , " Max rdma_atomic as responder %u too large (max %d) \n " ,
attr - > max_dest_rd_atomic , 1 < < dev - > qp_table . rdb_shift ) ;
return - EINVAL ;
}
2005-06-27 14:36:45 -07:00
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) )
return PTR_ERR ( mailbox ) ;
qp_param = mailbox - > buf ;
2005-04-16 15:20:36 -07:00
qp_context = & qp_param - > context ;
memset ( qp_param , 0 , sizeof * qp_param ) ;
qp_context - > flags = cpu_to_be32 ( ( to_mthca_state ( new_state ) < < 28 ) |
( to_mthca_st ( qp - > transport ) < < 16 ) ) ;
qp_context - > flags | = cpu_to_be32 ( MTHCA_QP_BIT_DE ) ;
if ( ! ( attr_mask & IB_QP_PATH_MIG_STATE ) )
qp_context - > flags | = cpu_to_be32 ( MTHCA_QP_PM_MIGRATED < < 11 ) ;
else {
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_PM_STATE ) ;
switch ( attr - > path_mig_state ) {
case IB_MIG_MIGRATED :
qp_context - > flags | = cpu_to_be32 ( MTHCA_QP_PM_MIGRATED < < 11 ) ;
break ;
case IB_MIG_REARM :
qp_context - > flags | = cpu_to_be32 ( MTHCA_QP_PM_REARM < < 11 ) ;
break ;
case IB_MIG_ARMED :
qp_context - > flags | = cpu_to_be32 ( MTHCA_QP_PM_ARMED < < 11 ) ;
break ;
}
}
/* leave tavor_sched_queue as 0 */
if ( qp - > transport = = MLX | | qp - > transport = = UD )
qp_context - > mtu_msgmax = ( IB_MTU_2048 < < 5 ) | 11 ;
2006-03-19 17:20:36 +02:00
else if ( attr_mask & IB_QP_PATH_MTU ) {
if ( attr - > path_mtu < IB_MTU_256 | | attr - > path_mtu > IB_MTU_2048 ) {
mthca_dbg ( dev , " path MTU (%u) is invalid \n " ,
attr - > path_mtu ) ;
return - EINVAL ;
}
2005-04-16 15:20:36 -07:00
qp_context - > mtu_msgmax = ( attr - > path_mtu < < 5 ) | 31 ;
2006-03-19 17:20:36 +02:00
}
2005-04-16 15:20:36 -07:00
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-08-19 10:59:31 -07:00
if ( qp - > rq . max )
qp_context - > rq_size_stride = long_log2 ( qp - > rq . max ) < < 3 ;
qp_context - > rq_size_stride | = qp - > rq . wqe_shift - 4 ;
if ( qp - > sq . max )
qp_context - > sq_size_stride = long_log2 ( qp - > sq . max ) < < 3 ;
qp_context - > sq_size_stride | = qp - > sq . wqe_shift - 4 ;
2005-04-16 15:20:36 -07:00
}
/* leave arbel_sched_queue as 0 */
2005-07-07 17:57:20 -07:00
if ( qp - > ibqp . uobject )
qp_context - > usr_page =
cpu_to_be32 ( to_mucontext ( qp - > ibqp . uobject - > context ) - > uar . index ) ;
else
qp_context - > usr_page = cpu_to_be32 ( dev - > driver_uar . index ) ;
2005-04-16 15:20:36 -07:00
qp_context - > local_qpn = cpu_to_be32 ( qp - > qpn ) ;
if ( attr_mask & IB_QP_DEST_QPN ) {
qp_context - > remote_qpn = cpu_to_be32 ( attr - > dest_qp_num ) ;
}
if ( qp - > transport = = MLX )
qp_context - > pri_path . port_pkey | =
2006-04-10 09:43:47 -07:00
cpu_to_be32 ( qp - > port < < 24 ) ;
2005-04-16 15:20:36 -07:00
else {
if ( attr_mask & IB_QP_PORT ) {
qp_context - > pri_path . port_pkey | =
cpu_to_be32 ( attr - > port_num < < 24 ) ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_PORT_NUM ) ;
}
}
if ( attr_mask & IB_QP_PKEY_INDEX ) {
qp_context - > pri_path . port_pkey | =
cpu_to_be32 ( attr - > pkey_index ) ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_PKEY_INDEX ) ;
}
if ( attr_mask & IB_QP_RNR_RETRY ) {
2006-01-06 13:23:58 -08:00
qp_context - > alt_path . rnr_retry = qp_context - > pri_path . rnr_retry =
attr - > rnr_retry < < 5 ;
2006-02-01 13:38:24 -08:00
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_RNR_RETRY |
2006-01-06 13:23:58 -08:00
MTHCA_QP_OPTPAR_ALT_RNR_RETRY ) ;
2005-04-16 15:20:36 -07:00
}
if ( attr_mask & IB_QP_AV ) {
2006-04-10 09:43:47 -07:00
if ( mthca_path_set ( dev , & attr - > ah_attr , & qp_context - > pri_path ,
attr_mask & IB_QP_PORT ? attr - > port_num : qp - > port ) )
2006-03-19 17:20:36 +02:00
return - EINVAL ;
2005-04-16 15:20:36 -07:00
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH ) ;
}
if ( attr_mask & IB_QP_TIMEOUT ) {
2005-09-12 14:08:51 -07:00
qp_context - > pri_path . ackto = attr - > timeout < < 3 ;
2005-04-16 15:20:36 -07:00
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_ACK_TIMEOUT ) ;
}
2006-01-06 13:23:58 -08:00
if ( attr_mask & IB_QP_ALT_PATH ) {
2006-03-01 14:28:12 -08:00
if ( attr - > alt_pkey_index > = dev - > limits . pkey_table_len ) {
mthca_dbg ( dev , " Alternate P_Key index (%u) too large. max is %d \n " ,
attr - > alt_pkey_index , dev - > limits . pkey_table_len - 1 ) ;
return - EINVAL ;
}
2006-01-06 13:23:58 -08:00
if ( attr - > alt_port_num = = 0 | | attr - > alt_port_num > dev - > limits . num_ports ) {
2006-02-01 13:38:24 -08:00
mthca_dbg ( dev , " Alternate port number (%u) is invalid \n " ,
2006-01-06 13:23:58 -08:00
attr - > alt_port_num ) ;
return - EINVAL ;
}
2006-04-10 09:43:47 -07:00
if ( mthca_path_set ( dev , & attr - > alt_ah_attr , & qp_context - > alt_path ,
attr - > alt_ah_attr . port_num ) )
2006-03-19 17:20:36 +02:00
return - EINVAL ;
2006-02-01 13:38:24 -08:00
qp_context - > alt_path . port_pkey | = cpu_to_be32 ( attr - > alt_pkey_index |
2006-01-06 13:23:58 -08:00
attr - > alt_port_num < < 24 ) ;
qp_context - > alt_path . ackto = attr - > alt_timeout < < 3 ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_ALT_ADDR_PATH ) ;
}
2005-04-16 15:20:36 -07:00
/* leave rdd as 0 */
qp_context - > pd = cpu_to_be32 ( to_mpd ( ibqp - > pd ) - > pd_num ) ;
/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
qp_context - > wqe_lkey = cpu_to_be32 ( qp - > mr . ibmr . lkey ) ;
qp_context - > params1 = cpu_to_be32 ( ( MTHCA_ACK_REQ_FREQ < < 28 ) |
( MTHCA_FLIGHT_LIMIT < < 24 ) |
2005-12-15 19:59:01 -08:00
MTHCA_QP_BIT_SWE ) ;
2005-04-16 15:20:36 -07:00
if ( qp - > sq_policy = = IB_SIGNAL_ALL_WR )
qp_context - > params1 | = cpu_to_be32 ( MTHCA_QP_BIT_SSC ) ;
if ( attr_mask & IB_QP_RETRY_CNT ) {
qp_context - > params1 | = cpu_to_be32 ( attr - > retry_cnt < < 16 ) ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_RETRY_COUNT ) ;
}
2005-06-27 14:36:41 -07:00
if ( attr_mask & IB_QP_MAX_QP_RD_ATOMIC ) {
2005-12-15 19:59:01 -08:00
if ( attr - > max_rd_atomic ) {
qp_context - > params1 | =
cpu_to_be32 ( MTHCA_QP_BIT_SRE |
MTHCA_QP_BIT_SAE ) ;
2005-12-09 16:38:04 -08:00
qp_context - > params1 | =
cpu_to_be32 ( fls ( attr - > max_rd_atomic - 1 ) < < 21 ) ;
2005-12-15 19:59:01 -08:00
}
2005-04-16 15:20:36 -07:00
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_SRA_MAX ) ;
}
if ( attr_mask & IB_QP_SQ_PSN )
qp_context - > next_send_psn = cpu_to_be32 ( attr - > sq_psn ) ;
qp_context - > cqn_snd = cpu_to_be32 ( to_mcq ( ibqp - > send_cq ) - > cqn ) ;
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:20:36 -07:00
qp_context - > snd_wqe_base_l = cpu_to_be32 ( qp - > send_wqe_offset ) ;
qp_context - > snd_db_index = cpu_to_be32 ( qp - > sq . db_index ) ;
}
2005-06-27 14:36:41 -07:00
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC ) {
2005-12-09 16:38:04 -08:00
if ( attr - > max_dest_rd_atomic )
qp_context - > params2 | =
cpu_to_be32 ( fls ( attr - > max_dest_rd_atomic - 1 ) < < 21 ) ;
2005-04-16 15:20:36 -07:00
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_RRA_MAX ) ;
}
2005-12-15 14:36:24 -08:00
if ( attr_mask & ( IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC ) ) {
qp_context - > params2 | = get_hw_access_flags ( qp , attr , attr_mask ) ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_RWE |
MTHCA_QP_OPTPAR_RRE |
MTHCA_QP_OPTPAR_RAE ) ;
}
2005-04-16 15:20:36 -07:00
qp_context - > params2 | = cpu_to_be32 ( MTHCA_QP_BIT_RSC ) ;
2005-08-19 10:59:31 -07:00
if ( ibqp - > srq )
qp_context - > params2 | = cpu_to_be32 ( MTHCA_QP_BIT_RIC ) ;
2005-04-16 15:20:36 -07:00
if ( attr_mask & IB_QP_MIN_RNR_TIMER ) {
qp_context - > rnr_nextrecvpsn | = cpu_to_be32 ( attr - > min_rnr_timer < < 24 ) ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_RNR_TIMEOUT ) ;
}
if ( attr_mask & IB_QP_RQ_PSN )
qp_context - > rnr_nextrecvpsn | = cpu_to_be32 ( attr - > rq_psn ) ;
qp_context - > ra_buff_indx =
cpu_to_be32 ( dev - > qp_table . rdb_base +
( ( qp - > qpn & ( dev - > limits . num_qps - 1 ) ) * MTHCA_RDB_ENTRY_SIZE < <
dev - > qp_table . rdb_shift ) ) ;
qp_context - > cqn_rcv = cpu_to_be32 ( to_mcq ( ibqp - > recv_cq ) - > cqn ) ;
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) )
2005-04-16 15:20:36 -07:00
qp_context - > rcv_db_index = cpu_to_be32 ( qp - > rq . db_index ) ;
if ( attr_mask & IB_QP_QKEY ) {
qp_context - > qkey = cpu_to_be32 ( attr - > qkey ) ;
qp_param - > opt_param_mask | = cpu_to_be32 ( MTHCA_QP_OPTPAR_Q_KEY ) ;
}
2005-08-19 10:59:31 -07:00
if ( ibqp - > srq )
qp_context - > srqn = cpu_to_be32 ( 1 < < 24 |
to_msrq ( ibqp - > srq ) - > srqn ) ;
2006-02-03 14:53:28 -08:00
if ( cur_state = = IB_QPS_RTS & & new_state = = IB_QPS_SQD & &
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY & &
attr - > en_sqd_async_notify )
sqd_event = 1 < < 31 ;
2006-02-13 16:30:18 -08:00
err = mthca_MODIFY_QP ( dev , cur_state , new_state , qp - > qpn , 0 ,
mailbox , sqd_event , & status ) ;
2006-03-24 15:47:30 -08:00
if ( err )
goto out ;
2005-04-16 15:20:36 -07:00
if ( status ) {
2006-02-13 16:30:18 -08:00
mthca_warn ( dev , " modify QP %d->%d returned status %02x. \n " ,
cur_state , new_state , status ) ;
2005-04-16 15:20:36 -07:00
err = - EINVAL ;
2006-03-24 15:47:30 -08:00
goto out ;
2005-04-16 15:20:36 -07:00
}
2006-03-24 15:47:30 -08:00
qp - > state = new_state ;
if ( attr_mask & IB_QP_ACCESS_FLAGS )
qp - > atomic_rd_en = attr - > qp_access_flags ;
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC )
qp - > resp_depth = attr - > max_dest_rd_atomic ;
2006-04-10 09:43:47 -07:00
if ( attr_mask & IB_QP_PORT )
qp - > port = attr - > port_num ;
if ( attr_mask & IB_QP_ALT_PATH )
qp - > alt_port = attr - > alt_port_num ;
2005-04-16 15:20:36 -07:00
if ( is_sqp ( dev , qp ) )
store_attrs ( to_msqp ( qp ) , attr , attr_mask ) ;
/*
2005-09-07 09:43:23 -07:00
* If we moved QP0 to RTR , bring the IB link up ; if we moved
* QP0 to RESET or ERROR , bring the link back down .
2005-04-16 15:20:36 -07:00
*/
if ( is_qp0 ( dev , qp ) ) {
if ( cur_state ! = IB_QPS_RTR & &
new_state = = IB_QPS_RTR )
2006-04-10 09:43:47 -07:00
init_port ( dev , qp - > port ) ;
2005-04-16 15:20:36 -07:00
if ( cur_state ! = IB_QPS_RESET & &
cur_state ! = IB_QPS_ERR & &
( new_state = = IB_QPS_RESET | |
new_state = = IB_QPS_ERR ) )
2006-04-10 09:43:47 -07:00
mthca_CLOSE_IB ( dev , qp - > port , & status ) ;
2005-04-16 15:20:36 -07:00
}
2005-09-07 09:43:23 -07:00
/*
* If we moved a kernel QP to RESET , clean up all old CQ
* entries and reinitialize the QP .
*/
2006-03-24 15:47:30 -08:00
if ( new_state = = IB_QPS_RESET & & ! qp - > ibqp . uobject ) {
2006-05-09 10:50:29 -07:00
mthca_cq_clean ( dev , to_mcq ( qp - > ibqp . send_cq ) , qp - > qpn ,
2005-09-07 09:43:23 -07:00
qp - > ibqp . srq ? to_msrq ( qp - > ibqp . srq ) : NULL ) ;
if ( qp - > ibqp . send_cq ! = qp - > ibqp . recv_cq )
2006-05-09 10:50:29 -07:00
mthca_cq_clean ( dev , to_mcq ( qp - > ibqp . recv_cq ) , qp - > qpn ,
2005-09-07 09:43:23 -07:00
qp - > ibqp . srq ? to_msrq ( qp - > ibqp . srq ) : NULL ) ;
mthca_wq_init ( & qp - > sq ) ;
2005-11-28 11:19:43 -08:00
qp - > sq . last = get_send_wqe ( qp , qp - > sq . max - 1 ) ;
2005-09-07 09:43:23 -07:00
mthca_wq_init ( & qp - > rq ) ;
2005-11-28 11:19:43 -08:00
qp - > rq . last = get_recv_wqe ( qp , qp - > rq . max - 1 ) ;
2005-09-07 09:43:23 -07:00
if ( mthca_is_memfree ( dev ) ) {
* qp - > sq . db = 0 ;
* qp - > rq . db = 0 ;
}
}
2006-03-24 15:47:30 -08:00
out :
mthca_free_mailbox ( dev , mailbox ) ;
2005-04-16 15:20:36 -07:00
return err ;
}
2006-01-06 12:57:30 -08:00
static int mthca_max_data_size ( struct mthca_dev * dev , struct mthca_qp * qp , int desc_sz )
2005-11-09 11:26:07 -08:00
{
/*
* Calculate the maximum size of WQE s / g segments , excluding
* the next segment and other non - data segments .
*/
2006-01-06 12:57:30 -08:00
int max_data_size = desc_sz - sizeof ( struct mthca_next_seg ) ;
2005-11-09 11:26:07 -08:00
switch ( qp - > transport ) {
case MLX :
max_data_size - = 2 * sizeof ( struct mthca_data_seg ) ;
break ;
case UD :
if ( mthca_is_memfree ( dev ) )
max_data_size - = sizeof ( struct mthca_arbel_ud_seg ) ;
else
max_data_size - = sizeof ( struct mthca_tavor_ud_seg ) ;
break ;
default :
max_data_size - = sizeof ( struct mthca_raddr_seg ) ;
break ;
}
2006-01-06 12:57:30 -08:00
return max_data_size ;
}
static inline int mthca_max_inline_data ( struct mthca_pd * pd , int max_data_size )
{
2005-11-09 11:26:07 -08:00
/* We don't support inline data for kernel QPs (yet). */
2006-01-06 12:57:30 -08:00
return pd - > ibpd . uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0 ;
}
static void mthca_adjust_qp_caps ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_qp * qp )
{
int max_data_size = mthca_max_data_size ( dev , qp ,
min ( dev - > limits . max_desc_sz ,
1 < < qp - > sq . wqe_shift ) ) ;
qp - > max_inline_data = mthca_max_inline_data ( pd , max_data_size ) ;
2005-11-09 11:26:07 -08:00
2005-11-18 14:11:17 -08:00
qp - > sq . max_gs = min_t ( int , dev - > limits . max_sg ,
max_data_size / sizeof ( struct mthca_data_seg ) ) ;
qp - > rq . max_gs = min_t ( int , dev - > limits . max_sg ,
( min ( dev - > limits . max_desc_sz , 1 < < qp - > rq . wqe_shift ) -
sizeof ( struct mthca_next_seg ) ) /
sizeof ( struct mthca_data_seg ) ) ;
2005-11-09 11:26:07 -08:00
}
2005-04-16 15:20:36 -07:00
/*
* Allocate and register buffer for WQEs . qp - > rq . max , sq . max ,
* rq . max_gs and sq . max_gs must all be assigned .
* mthca_alloc_wqe_buf will calculate rq . wqe_shift and
* sq . wqe_shift ( as well as send_wqe_offset , is_direct , and
* queue )
*/
static int mthca_alloc_wqe_buf ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_qp * qp )
{
int size ;
int err = - ENOMEM ;
size = sizeof ( struct mthca_next_seg ) +
qp - > rq . max_gs * sizeof ( struct mthca_data_seg ) ;
2005-11-09 11:26:07 -08:00
if ( size > dev - > limits . max_desc_sz )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
for ( qp - > rq . wqe_shift = 6 ; 1 < < qp - > rq . wqe_shift < size ;
qp - > rq . wqe_shift + + )
; /* nothing */
2005-11-09 11:26:07 -08:00
size = qp - > sq . max_gs * sizeof ( struct mthca_data_seg ) ;
2005-04-16 15:20:36 -07:00
switch ( qp - > transport ) {
case MLX :
size + = 2 * sizeof ( struct mthca_data_seg ) ;
break ;
2005-11-09 11:26:07 -08:00
2005-04-16 15:20:36 -07:00
case UD :
2005-11-09 11:26:07 -08:00
size + = mthca_is_memfree ( dev ) ?
sizeof ( struct mthca_arbel_ud_seg ) :
sizeof ( struct mthca_tavor_ud_seg ) ;
2005-04-16 15:20:36 -07:00
break ;
2005-11-09 11:26:07 -08:00
case UC :
size + = sizeof ( struct mthca_raddr_seg ) ;
break ;
case RC :
size + = sizeof ( struct mthca_raddr_seg ) ;
/*
* An atomic op will require an atomic segment , a
* remote address segment and one scatter entry .
*/
size = max_t ( int , size ,
sizeof ( struct mthca_atomic_seg ) +
sizeof ( struct mthca_raddr_seg ) +
sizeof ( struct mthca_data_seg ) ) ;
break ;
2005-04-16 15:20:36 -07:00
default :
2005-11-09 11:26:07 -08:00
break ;
2005-04-16 15:20:36 -07:00
}
2005-11-09 11:26:07 -08:00
/* Make sure that we have enough space for a bind request */
size = max_t ( int , size , sizeof ( struct mthca_bind_seg ) ) ;
size + = sizeof ( struct mthca_next_seg ) ;
if ( size > dev - > limits . max_desc_sz )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
for ( qp - > sq . wqe_shift = 6 ; 1 < < qp - > sq . wqe_shift < size ;
qp - > sq . wqe_shift + + )
; /* nothing */
qp - > send_wqe_offset = ALIGN ( qp - > rq . max < < qp - > rq . wqe_shift ,
1 < < qp - > sq . wqe_shift ) ;
2005-07-07 17:57:20 -07:00
/*
* If this is a userspace QP , we don ' t actually have to
* allocate anything . All we need is to calculate the WQE
* sizes and the send_wqe_offset , so we ' re done now .
*/
if ( pd - > ibpd . uobject )
return 0 ;
2005-04-16 15:20:36 -07:00
size = PAGE_ALIGN ( qp - > send_wqe_offset +
( qp - > sq . max < < qp - > sq . wqe_shift ) ) ;
qp - > wrid = kmalloc ( ( qp - > rq . max + qp - > sq . max ) * sizeof ( u64 ) ,
GFP_KERNEL ) ;
if ( ! qp - > wrid )
goto err_out ;
2005-08-18 13:39:31 -07:00
err = mthca_buf_alloc ( dev , size , MTHCA_MAX_DIRECT_QP_SIZE ,
& qp - > queue , & qp - > is_direct , pd , 0 , & qp - > mr ) ;
2005-04-16 15:20:36 -07:00
if ( err )
2005-08-18 13:39:31 -07:00
goto err_out ;
2005-04-16 15:20:36 -07:00
return 0 ;
2005-08-18 13:39:31 -07:00
err_out :
2005-04-16 15:20:36 -07:00
kfree ( qp - > wrid ) ;
return err ;
}
2005-07-07 17:57:20 -07:00
static void mthca_free_wqe_buf ( struct mthca_dev * dev ,
2005-04-16 15:20:36 -07:00
struct mthca_qp * qp )
{
2005-08-18 13:39:31 -07:00
mthca_buf_free ( dev , PAGE_ALIGN ( qp - > send_wqe_offset +
( qp - > sq . max < < qp - > sq . wqe_shift ) ) ,
& qp - > queue , qp - > is_direct , & qp - > mr ) ;
2005-07-07 17:57:20 -07:00
kfree ( qp - > wrid ) ;
}
static int mthca_map_memfree ( struct mthca_dev * dev ,
struct mthca_qp * qp )
{
int ret ;
2005-04-16 15:20:36 -07:00
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:20:36 -07:00
ret = mthca_table_get ( dev , dev - > qp_table . qp_table , qp - > qpn ) ;
if ( ret )
return ret ;
ret = mthca_table_get ( dev , dev - > qp_table . eqp_table , qp - > qpn ) ;
if ( ret )
goto err_qpc ;
2006-02-01 13:38:24 -08:00
ret = mthca_table_get ( dev , dev - > qp_table . rdb_table ,
qp - > qpn < < dev - > qp_table . rdb_shift ) ;
if ( ret )
goto err_eqpc ;
2005-04-16 15:20:36 -07:00
}
return 0 ;
err_eqpc :
mthca_table_put ( dev , dev - > qp_table . eqp_table , qp - > qpn ) ;
err_qpc :
mthca_table_put ( dev , dev - > qp_table . qp_table , qp - > qpn ) ;
return ret ;
}
2005-07-07 17:57:20 -07:00
static void mthca_unmap_memfree ( struct mthca_dev * dev ,
struct mthca_qp * qp )
{
mthca_table_put ( dev , dev - > qp_table . rdb_table ,
qp - > qpn < < dev - > qp_table . rdb_shift ) ;
mthca_table_put ( dev , dev - > qp_table . eqp_table , qp - > qpn ) ;
mthca_table_put ( dev , dev - > qp_table . qp_table , qp - > qpn ) ;
}
static int mthca_alloc_memfree ( struct mthca_dev * dev ,
struct mthca_qp * qp )
{
int ret = 0 ;
if ( mthca_is_memfree ( dev ) ) {
qp - > rq . db_index = mthca_alloc_db ( dev , MTHCA_DB_TYPE_RQ ,
qp - > qpn , & qp - > rq . db ) ;
if ( qp - > rq . db_index < 0 )
return ret ;
qp - > sq . db_index = mthca_alloc_db ( dev , MTHCA_DB_TYPE_SQ ,
qp - > qpn , & qp - > sq . db ) ;
if ( qp - > sq . db_index < 0 )
mthca_free_db ( dev , MTHCA_DB_TYPE_RQ , qp - > rq . db_index ) ;
}
return ret ;
}
2005-04-16 15:20:36 -07:00
static void mthca_free_memfree ( struct mthca_dev * dev ,
struct mthca_qp * qp )
{
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:20:36 -07:00
mthca_free_db ( dev , MTHCA_DB_TYPE_SQ , qp - > sq . db_index ) ;
mthca_free_db ( dev , MTHCA_DB_TYPE_RQ , qp - > rq . db_index ) ;
}
}
static int mthca_alloc_qp_common ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_cq * send_cq ,
struct mthca_cq * recv_cq ,
enum ib_sig_type send_policy ,
struct mthca_qp * qp )
{
int ret ;
int i ;
2006-05-09 10:50:29 -07:00
qp - > refcount = 1 ;
2005-09-07 09:45:00 -07:00
init_waitqueue_head ( & qp - > wait ) ;
2005-04-16 15:20:36 -07:00
qp - > state = IB_QPS_RESET ;
qp - > atomic_rd_en = 0 ;
qp - > resp_depth = 0 ;
qp - > sq_policy = send_policy ;
mthca_wq_init ( & qp - > sq ) ;
mthca_wq_init ( & qp - > rq ) ;
2005-07-07 17:57:20 -07:00
ret = mthca_map_memfree ( dev , qp ) ;
2005-04-16 15:20:36 -07:00
if ( ret )
return ret ;
ret = mthca_alloc_wqe_buf ( dev , pd , qp ) ;
if ( ret ) {
2005-07-07 17:57:20 -07:00
mthca_unmap_memfree ( dev , qp ) ;
return ret ;
}
2005-11-09 11:26:07 -08:00
mthca_adjust_qp_caps ( dev , pd , qp ) ;
2005-07-07 17:57:20 -07:00
/*
* If this is a userspace QP , we ' re done now . The doorbells
* will be allocated and buffers will be initialized in
* userspace .
*/
if ( pd - > ibpd . uobject )
return 0 ;
ret = mthca_alloc_memfree ( dev , qp ) ;
if ( ret ) {
mthca_free_wqe_buf ( dev , qp ) ;
mthca_unmap_memfree ( dev , qp ) ;
2005-04-16 15:20:36 -07:00
return ret ;
}
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:26:33 -07:00
struct mthca_next_seg * next ;
struct mthca_data_seg * scatter ;
int size = ( sizeof ( struct mthca_next_seg ) +
qp - > rq . max_gs * sizeof ( struct mthca_data_seg ) ) / 16 ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < qp - > rq . max ; + + i ) {
2005-04-16 15:26:33 -07:00
next = get_recv_wqe ( qp , i ) ;
next - > nda_op = cpu_to_be32 ( ( ( i + 1 ) & ( qp - > rq . max - 1 ) ) < <
qp - > rq . wqe_shift ) ;
next - > ee_nds = cpu_to_be32 ( size ) ;
for ( scatter = ( void * ) ( next + 1 ) ;
( void * ) scatter < ( void * ) next + ( 1 < < qp - > rq . wqe_shift ) ;
+ + scatter )
scatter - > lkey = cpu_to_be32 ( MTHCA_INVAL_LKEY ) ;
2005-04-16 15:20:36 -07:00
}
for ( i = 0 ; i < qp - > sq . max ; + + i ) {
2005-04-16 15:26:33 -07:00
next = get_send_wqe ( qp , i ) ;
next - > nda_op = cpu_to_be32 ( ( ( ( i + 1 ) & ( qp - > sq . max - 1 ) ) < <
qp - > sq . wqe_shift ) +
qp - > send_wqe_offset ) ;
2005-04-16 15:20:36 -07:00
}
}
2005-09-13 10:41:03 -07:00
qp - > sq . last = get_send_wqe ( qp , qp - > sq . max - 1 ) ;
qp - > rq . last = get_recv_wqe ( qp , qp - > rq . max - 1 ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2005-07-07 17:57:20 -07:00
static int mthca_set_qp_size ( struct mthca_dev * dev , struct ib_qp_cap * cap ,
2006-01-06 12:57:30 -08:00
struct mthca_pd * pd , struct mthca_qp * qp )
2005-04-16 15:20:36 -07:00
{
2006-01-06 12:57:30 -08:00
int max_data_size = mthca_max_data_size ( dev , qp , dev - > limits . max_desc_sz ) ;
2005-07-07 17:57:20 -07:00
/* Sanity check QP size before proceeding */
2006-01-06 12:57:30 -08:00
if ( cap - > max_send_wr > dev - > limits . max_wqes | |
cap - > max_recv_wr > dev - > limits . max_wqes | |
cap - > max_send_sge > dev - > limits . max_sg | |
cap - > max_recv_sge > dev - > limits . max_sg | |
cap - > max_inline_data > mthca_max_inline_data ( pd , max_data_size ) )
return - EINVAL ;
/*
* For MLX transport we need 2 extra S / G entries :
* one for the header and one for the checksum at the end
*/
if ( qp - > transport = = MLX & & cap - > max_recv_sge + 2 > dev - > limits . max_sg )
2005-07-07 17:57:20 -07:00
return - EINVAL ;
2005-04-16 15:20:36 -07:00
2005-07-07 17:57:20 -07:00
if ( mthca_is_memfree ( dev ) ) {
qp - > rq . max = cap - > max_recv_wr ?
roundup_pow_of_two ( cap - > max_recv_wr ) : 0 ;
qp - > sq . max = cap - > max_send_wr ?
roundup_pow_of_two ( cap - > max_send_wr ) : 0 ;
} else {
qp - > rq . max = cap - > max_recv_wr ;
qp - > sq . max = cap - > max_send_wr ;
}
2005-04-16 15:20:36 -07:00
2005-07-07 17:57:20 -07:00
qp - > rq . max_gs = cap - > max_recv_sge ;
qp - > sq . max_gs = max_t ( int , cap - > max_send_sge ,
ALIGN ( cap - > max_inline_data + MTHCA_INLINE_HEADER_SIZE ,
MTHCA_INLINE_CHUNK_SIZE ) /
sizeof ( struct mthca_data_seg ) ) ;
2005-04-16 15:20:36 -07:00
2005-07-07 17:57:20 -07:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
int mthca_alloc_qp ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_cq * send_cq ,
struct mthca_cq * recv_cq ,
enum ib_qp_type type ,
enum ib_sig_type send_policy ,
2005-07-07 17:57:20 -07:00
struct ib_qp_cap * cap ,
2005-04-16 15:20:36 -07:00
struct mthca_qp * qp )
{
int err ;
switch ( type ) {
case IB_QPT_RC : qp - > transport = RC ; break ;
case IB_QPT_UC : qp - > transport = UC ; break ;
case IB_QPT_UD : qp - > transport = UD ; break ;
default : return - EINVAL ;
}
2006-03-22 09:52:31 +02:00
err = mthca_set_qp_size ( dev , cap , pd , qp ) ;
if ( err )
return err ;
2005-04-16 15:20:36 -07:00
qp - > qpn = mthca_alloc ( & dev - > qp_table . alloc ) ;
if ( qp - > qpn = = - 1 )
return - ENOMEM ;
2006-04-10 09:43:47 -07:00
/* initialize port to zero for error-catching. */
qp - > port = 0 ;
2005-04-16 15:20:36 -07:00
err = mthca_alloc_qp_common ( dev , pd , send_cq , recv_cq ,
send_policy , qp ) ;
if ( err ) {
mthca_free ( & dev - > qp_table . alloc , qp - > qpn ) ;
return err ;
}
spin_lock_irq ( & dev - > qp_table . lock ) ;
mthca_array_set ( & dev - > qp_table . qp ,
qp - > qpn & ( dev - > limits . num_qps - 1 ) , qp ) ;
spin_unlock_irq ( & dev - > qp_table . lock ) ;
return 0 ;
}
int mthca_alloc_sqp ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_cq * send_cq ,
struct mthca_cq * recv_cq ,
enum ib_sig_type send_policy ,
2005-07-07 17:57:20 -07:00
struct ib_qp_cap * cap ,
2005-04-16 15:20:36 -07:00
int qpn ,
int port ,
struct mthca_sqp * sqp )
{
u32 mqpn = qpn * 2 + dev - > qp_table . sqp_start + port - 1 ;
2005-07-07 17:57:20 -07:00
int err ;
2005-04-16 15:20:36 -07:00
2006-03-22 09:52:31 +02:00
sqp - > qp . transport = MLX ;
2006-01-06 12:57:30 -08:00
err = mthca_set_qp_size ( dev , cap , pd , & sqp - > qp ) ;
2005-07-07 17:57:20 -07:00
if ( err )
return err ;
2005-04-16 15:20:36 -07:00
sqp - > header_buf_size = sqp - > qp . sq . max * MTHCA_UD_HEADER_SIZE ;
sqp - > header_buf = dma_alloc_coherent ( & dev - > pdev - > dev , sqp - > header_buf_size ,
& sqp - > header_dma , GFP_KERNEL ) ;
if ( ! sqp - > header_buf )
return - ENOMEM ;
spin_lock_irq ( & dev - > qp_table . lock ) ;
if ( mthca_array_get ( & dev - > qp_table . qp , mqpn ) )
err = - EBUSY ;
else
mthca_array_set ( & dev - > qp_table . qp , mqpn , sqp ) ;
spin_unlock_irq ( & dev - > qp_table . lock ) ;
if ( err )
goto err_out ;
2006-04-10 09:43:47 -07:00
sqp - > qp . port = port ;
2005-04-16 15:20:36 -07:00
sqp - > qp . qpn = mqpn ;
sqp - > qp . transport = MLX ;
err = mthca_alloc_qp_common ( dev , pd , send_cq , recv_cq ,
send_policy , & sqp - > qp ) ;
if ( err )
goto err_out_free ;
atomic_inc ( & pd - > sqp_count ) ;
return 0 ;
err_out_free :
/*
* Lock CQs here , so that CQ polling code can do QP lookup
* without taking a lock .
*/
spin_lock_irq ( & send_cq - > lock ) ;
if ( send_cq ! = recv_cq )
spin_lock ( & recv_cq - > lock ) ;
spin_lock ( & dev - > qp_table . lock ) ;
mthca_array_clear ( & dev - > qp_table . qp , mqpn ) ;
spin_unlock ( & dev - > qp_table . lock ) ;
if ( send_cq ! = recv_cq )
spin_unlock ( & recv_cq - > lock ) ;
spin_unlock_irq ( & send_cq - > lock ) ;
err_out :
dma_free_coherent ( & dev - > pdev - > dev , sqp - > header_buf_size ,
sqp - > header_buf , sqp - > header_dma ) ;
return err ;
}
2006-05-09 10:50:29 -07:00
static inline int get_qp_refcount ( struct mthca_dev * dev , struct mthca_qp * qp )
{
int c ;
spin_lock_irq ( & dev - > qp_table . lock ) ;
c = qp - > refcount ;
spin_unlock_irq ( & dev - > qp_table . lock ) ;
return c ;
}
2005-04-16 15:20:36 -07:00
void mthca_free_qp ( struct mthca_dev * dev ,
struct mthca_qp * qp )
{
u8 status ;
struct mthca_cq * send_cq ;
struct mthca_cq * recv_cq ;
send_cq = to_mcq ( qp - > ibqp . send_cq ) ;
recv_cq = to_mcq ( qp - > ibqp . recv_cq ) ;
/*
* Lock CQs here , so that CQ polling code can do QP lookup
* without taking a lock .
*/
spin_lock_irq ( & send_cq - > lock ) ;
if ( send_cq ! = recv_cq )
spin_lock ( & recv_cq - > lock ) ;
spin_lock ( & dev - > qp_table . lock ) ;
mthca_array_clear ( & dev - > qp_table . qp ,
qp - > qpn & ( dev - > limits . num_qps - 1 ) ) ;
2006-05-09 10:50:29 -07:00
- - qp - > refcount ;
2005-04-16 15:20:36 -07:00
spin_unlock ( & dev - > qp_table . lock ) ;
if ( send_cq ! = recv_cq )
spin_unlock ( & recv_cq - > lock ) ;
spin_unlock_irq ( & send_cq - > lock ) ;
2006-05-09 10:50:29 -07:00
wait_event ( qp - > wait , ! get_qp_refcount ( dev , qp ) ) ;
2005-04-16 15:20:36 -07:00
if ( qp - > state ! = IB_QPS_RESET )
2006-02-13 16:30:18 -08:00
mthca_MODIFY_QP ( dev , qp - > state , IB_QPS_RESET , qp - > qpn , 0 ,
NULL , 0 , & status ) ;
2005-04-16 15:20:36 -07:00
2005-07-07 17:57:20 -07:00
/*
* If this is a userspace QP , the buffers , MR , CQs and so on
* will be cleaned up in userspace , so all we have to do is
* unref the mem - free tables and free the QPN in our table .
*/
if ( ! qp - > ibqp . uobject ) {
2006-05-09 10:50:29 -07:00
mthca_cq_clean ( dev , to_mcq ( qp - > ibqp . send_cq ) , qp - > qpn ,
2005-08-19 10:59:31 -07:00
qp - > ibqp . srq ? to_msrq ( qp - > ibqp . srq ) : NULL ) ;
2005-07-07 17:57:20 -07:00
if ( qp - > ibqp . send_cq ! = qp - > ibqp . recv_cq )
2006-05-09 10:50:29 -07:00
mthca_cq_clean ( dev , to_mcq ( qp - > ibqp . recv_cq ) , qp - > qpn ,
2005-08-19 10:59:31 -07:00
qp - > ibqp . srq ? to_msrq ( qp - > ibqp . srq ) : NULL ) ;
2005-04-16 15:20:36 -07:00
2005-07-07 17:57:20 -07:00
mthca_free_memfree ( dev , qp ) ;
mthca_free_wqe_buf ( dev , qp ) ;
2005-04-16 15:20:36 -07:00
}
2005-07-07 17:57:20 -07:00
mthca_unmap_memfree ( dev , qp ) ;
2005-04-16 15:20:36 -07:00
if ( is_sqp ( dev , qp ) ) {
atomic_dec ( & ( to_mpd ( qp - > ibqp . pd ) - > sqp_count ) ) ;
dma_free_coherent ( & dev - > pdev - > dev ,
to_msqp ( qp ) - > header_buf_size ,
to_msqp ( qp ) - > header_buf ,
to_msqp ( qp ) - > header_dma ) ;
} else
mthca_free ( & dev - > qp_table . alloc , qp - > qpn ) ;
}
/* Create UD header for an MLX send and build a data segment for it */
static int build_mlx_header ( struct mthca_dev * dev , struct mthca_sqp * sqp ,
int ind , struct ib_send_wr * wr ,
struct mthca_mlx_seg * mlx ,
struct mthca_data_seg * data )
{
int header_size ;
int err ;
2005-08-13 21:05:57 -07:00
u16 pkey ;
2005-04-16 15:20:36 -07:00
ib_ud_header_init ( 256 , /* assume a MAD */
2006-01-12 15:55:41 -08:00
mthca_ah_grh_present ( to_mah ( wr - > wr . ud . ah ) ) ,
2005-04-16 15:20:36 -07:00
& sqp - > ud_header ) ;
err = mthca_read_ah ( dev , to_mah ( wr - > wr . ud . ah ) , & sqp - > ud_header ) ;
if ( err )
return err ;
mlx - > flags & = ~ cpu_to_be32 ( MTHCA_NEXT_SOLICIT | 1 ) ;
mlx - > flags | = cpu_to_be32 ( ( ! sqp - > qp . ibqp . qp_num ? MTHCA_MLX_VL15 : 0 ) |
2005-08-13 21:05:57 -07:00
( sqp - > ud_header . lrh . destination_lid = =
IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0 ) |
2005-04-16 15:20:36 -07:00
( sqp - > ud_header . lrh . service_level < < 8 ) ) ;
mlx - > rlid = sqp - > ud_header . lrh . destination_lid ;
mlx - > vcrc = 0 ;
switch ( wr - > opcode ) {
case IB_WR_SEND :
sqp - > ud_header . bth . opcode = IB_OPCODE_UD_SEND_ONLY ;
sqp - > ud_header . immediate_present = 0 ;
break ;
case IB_WR_SEND_WITH_IMM :
sqp - > ud_header . bth . opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE ;
sqp - > ud_header . immediate_present = 1 ;
sqp - > ud_header . immediate_data = wr - > imm_data ;
break ;
default :
return - EINVAL ;
}
sqp - > ud_header . lrh . virtual_lane = ! sqp - > qp . ibqp . qp_num ? 15 : 0 ;
2005-08-13 21:05:57 -07:00
if ( sqp - > ud_header . lrh . destination_lid = = IB_LID_PERMISSIVE )
sqp - > ud_header . lrh . source_lid = IB_LID_PERMISSIVE ;
2005-04-16 15:20:36 -07:00
sqp - > ud_header . bth . solicited_event = ! ! ( wr - > send_flags & IB_SEND_SOLICITED ) ;
if ( ! sqp - > qp . ibqp . qp_num )
2006-04-10 09:43:47 -07:00
ib_get_cached_pkey ( & dev - > ib_dev , sqp - > qp . port ,
2005-08-13 21:05:57 -07:00
sqp - > pkey_index , & pkey ) ;
2005-04-16 15:20:36 -07:00
else
2006-04-10 09:43:47 -07:00
ib_get_cached_pkey ( & dev - > ib_dev , sqp - > qp . port ,
2005-08-13 21:05:57 -07:00
wr - > wr . ud . pkey_index , & pkey ) ;
sqp - > ud_header . bth . pkey = cpu_to_be16 ( pkey ) ;
2005-04-16 15:20:36 -07:00
sqp - > ud_header . bth . destination_qpn = cpu_to_be32 ( wr - > wr . ud . remote_qpn ) ;
sqp - > ud_header . bth . psn = cpu_to_be32 ( ( sqp - > send_psn + + ) & ( ( 1 < < 24 ) - 1 ) ) ;
sqp - > ud_header . deth . qkey = cpu_to_be32 ( wr - > wr . ud . remote_qkey & 0x80000000 ?
sqp - > qkey : wr - > wr . ud . remote_qkey ) ;
sqp - > ud_header . deth . source_qpn = cpu_to_be32 ( sqp - > qp . ibqp . qp_num ) ;
header_size = ib_ud_header_pack ( & sqp - > ud_header ,
sqp - > header_buf +
ind * MTHCA_UD_HEADER_SIZE ) ;
data - > byte_count = cpu_to_be32 ( header_size ) ;
data - > lkey = cpu_to_be32 ( to_mpd ( sqp - > qp . ibqp . pd ) - > ntmr . ibmr . lkey ) ;
data - > addr = cpu_to_be64 ( sqp - > header_dma +
ind * MTHCA_UD_HEADER_SIZE ) ;
return 0 ;
}
static inline int mthca_wq_overflow ( struct mthca_wq * wq , int nreq ,
struct ib_cq * ib_cq )
{
unsigned cur ;
struct mthca_cq * cq ;
cur = wq - > head - wq - > tail ;
if ( likely ( cur + nreq < wq - > max ) )
return 0 ;
cq = to_mcq ( ib_cq ) ;
spin_lock ( & cq - > lock ) ;
cur = wq - > head - wq - > tail ;
spin_unlock ( & cq - > lock ) ;
return cur + nreq > = wq - > max ;
}
int mthca_tavor_post_send ( struct ib_qp * ibqp , struct ib_send_wr * wr ,
struct ib_send_wr * * bad_wr )
{
struct mthca_dev * dev = to_mdev ( ibqp - > device ) ;
struct mthca_qp * qp = to_mqp ( ibqp ) ;
void * wqe ;
void * prev_wqe ;
unsigned long flags ;
int err = 0 ;
int nreq ;
int i ;
int size ;
int size0 = 0 ;
u32 f0 = 0 ;
int ind ;
u8 op0 = 0 ;
spin_lock_irqsave ( & qp - > sq . lock , flags ) ;
/* XXX check that state is OK to post send */
ind = qp - > sq . next_ind ;
for ( nreq = 0 ; wr ; + + nreq , wr = wr - > next ) {
if ( mthca_wq_overflow ( & qp - > sq , nreq , qp - > ibqp . send_cq ) ) {
mthca_err ( dev , " SQ %06x full (%u head, %u tail, "
" %d max, %d nreq) \n " , qp - > qpn ,
qp - > sq . head , qp - > sq . tail ,
qp - > sq . max , nreq ) ;
err = - ENOMEM ;
* bad_wr = wr ;
goto out ;
}
wqe = get_send_wqe ( qp , ind ) ;
prev_wqe = qp - > sq . last ;
qp - > sq . last = wqe ;
( ( struct mthca_next_seg * ) wqe ) - > nda_op = 0 ;
( ( struct mthca_next_seg * ) wqe ) - > ee_nds = 0 ;
( ( struct mthca_next_seg * ) wqe ) - > flags =
( ( wr - > send_flags & IB_SEND_SIGNALED ) ?
cpu_to_be32 ( MTHCA_NEXT_CQ_UPDATE ) : 0 ) |
( ( wr - > send_flags & IB_SEND_SOLICITED ) ?
cpu_to_be32 ( MTHCA_NEXT_SOLICIT ) : 0 ) |
cpu_to_be32 ( 1 ) ;
if ( wr - > opcode = = IB_WR_SEND_WITH_IMM | |
wr - > opcode = = IB_WR_RDMA_WRITE_WITH_IMM )
2005-04-16 15:26:16 -07:00
( ( struct mthca_next_seg * ) wqe ) - > imm = wr - > imm_data ;
2005-04-16 15:20:36 -07:00
wqe + = sizeof ( struct mthca_next_seg ) ;
size = sizeof ( struct mthca_next_seg ) / 16 ;
switch ( qp - > transport ) {
case RC :
switch ( wr - > opcode ) {
case IB_WR_ATOMIC_CMP_AND_SWP :
case IB_WR_ATOMIC_FETCH_AND_ADD :
( ( struct mthca_raddr_seg * ) wqe ) - > raddr =
cpu_to_be64 ( wr - > wr . atomic . remote_addr ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > rkey =
cpu_to_be32 ( wr - > wr . atomic . rkey ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > reserved = 0 ;
wqe + = sizeof ( struct mthca_raddr_seg ) ;
if ( wr - > opcode = = IB_WR_ATOMIC_CMP_AND_SWP ) {
( ( struct mthca_atomic_seg * ) wqe ) - > swap_add =
cpu_to_be64 ( wr - > wr . atomic . swap ) ;
( ( struct mthca_atomic_seg * ) wqe ) - > compare =
cpu_to_be64 ( wr - > wr . atomic . compare_add ) ;
} else {
( ( struct mthca_atomic_seg * ) wqe ) - > swap_add =
cpu_to_be64 ( wr - > wr . atomic . compare_add ) ;
( ( struct mthca_atomic_seg * ) wqe ) - > compare = 0 ;
}
wqe + = sizeof ( struct mthca_atomic_seg ) ;
2005-11-09 11:30:14 -08:00
size + = ( sizeof ( struct mthca_raddr_seg ) +
sizeof ( struct mthca_atomic_seg ) ) / 16 ;
2005-04-16 15:20:36 -07:00
break ;
case IB_WR_RDMA_WRITE :
case IB_WR_RDMA_WRITE_WITH_IMM :
case IB_WR_RDMA_READ :
( ( struct mthca_raddr_seg * ) wqe ) - > raddr =
cpu_to_be64 ( wr - > wr . rdma . remote_addr ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > rkey =
cpu_to_be32 ( wr - > wr . rdma . rkey ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > reserved = 0 ;
wqe + = sizeof ( struct mthca_raddr_seg ) ;
size + = sizeof ( struct mthca_raddr_seg ) / 16 ;
break ;
default :
/* No extra segments required for sends */
break ;
}
break ;
2005-06-27 14:36:42 -07:00
case UC :
switch ( wr - > opcode ) {
case IB_WR_RDMA_WRITE :
case IB_WR_RDMA_WRITE_WITH_IMM :
( ( struct mthca_raddr_seg * ) wqe ) - > raddr =
cpu_to_be64 ( wr - > wr . rdma . remote_addr ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > rkey =
cpu_to_be32 ( wr - > wr . rdma . rkey ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > reserved = 0 ;
wqe + = sizeof ( struct mthca_raddr_seg ) ;
size + = sizeof ( struct mthca_raddr_seg ) / 16 ;
break ;
default :
/* No extra segments required for sends */
break ;
}
break ;
2005-04-16 15:20:36 -07:00
case UD :
( ( struct mthca_tavor_ud_seg * ) wqe ) - > lkey =
cpu_to_be32 ( to_mah ( wr - > wr . ud . ah ) - > key ) ;
( ( struct mthca_tavor_ud_seg * ) wqe ) - > av_addr =
cpu_to_be64 ( to_mah ( wr - > wr . ud . ah ) - > avdma ) ;
( ( struct mthca_tavor_ud_seg * ) wqe ) - > dqpn =
cpu_to_be32 ( wr - > wr . ud . remote_qpn ) ;
( ( struct mthca_tavor_ud_seg * ) wqe ) - > qkey =
cpu_to_be32 ( wr - > wr . ud . remote_qkey ) ;
wqe + = sizeof ( struct mthca_tavor_ud_seg ) ;
size + = sizeof ( struct mthca_tavor_ud_seg ) / 16 ;
break ;
case MLX :
err = build_mlx_header ( dev , to_msqp ( qp ) , ind , wr ,
wqe - sizeof ( struct mthca_next_seg ) ,
wqe ) ;
if ( err ) {
* bad_wr = wr ;
goto out ;
}
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
break ;
}
if ( wr - > num_sge > qp - > sq . max_gs ) {
mthca_err ( dev , " too many gathers \n " ) ;
err = - EINVAL ;
* bad_wr = wr ;
goto out ;
}
for ( i = 0 ; i < wr - > num_sge ; + + i ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count =
cpu_to_be32 ( wr - > sg_list [ i ] . length ) ;
( ( struct mthca_data_seg * ) wqe ) - > lkey =
cpu_to_be32 ( wr - > sg_list [ i ] . lkey ) ;
( ( struct mthca_data_seg * ) wqe ) - > addr =
cpu_to_be64 ( wr - > sg_list [ i ] . addr ) ;
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
}
/* Add one more inline data segment for ICRC */
if ( qp - > transport = = MLX ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count =
cpu_to_be32 ( ( 1 < < 31 ) | 4 ) ;
( ( u32 * ) wqe ) [ 1 ] = 0 ;
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
}
qp - > wrid [ ind + qp - > rq . max ] = wr - > wr_id ;
if ( wr - > opcode > = ARRAY_SIZE ( mthca_opcode ) ) {
mthca_err ( dev , " opcode invalid \n " ) ;
err = - EINVAL ;
* bad_wr = wr ;
goto out ;
}
2005-09-13 10:41:03 -07:00
( ( struct mthca_next_seg * ) prev_wqe ) - > nda_op =
cpu_to_be32 ( ( ( ind < < qp - > sq . wqe_shift ) +
qp - > send_wqe_offset ) |
mthca_opcode [ wr - > opcode ] ) ;
wmb ( ) ;
( ( struct mthca_next_seg * ) prev_wqe ) - > ee_nds =
2006-02-27 21:02:00 -08:00
cpu_to_be32 ( ( size0 ? 0 : MTHCA_NEXT_DBD ) | size |
( ( wr - > send_flags & IB_SEND_FENCE ) ?
MTHCA_NEXT_FENCE : 0 ) ) ;
2005-04-16 15:20:36 -07:00
if ( ! size0 ) {
size0 = size ;
op0 = mthca_opcode [ wr - > opcode ] ;
}
+ + ind ;
if ( unlikely ( ind > = qp - > sq . max ) )
ind - = qp - > sq . max ;
}
out :
if ( likely ( nreq ) ) {
2005-08-13 21:05:57 -07:00
__be32 doorbell [ 2 ] ;
2005-04-16 15:20:36 -07:00
doorbell [ 0 ] = cpu_to_be32 ( ( ( qp - > sq . next_ind < < qp - > sq . wqe_shift ) +
qp - > send_wqe_offset ) | f0 | op0 ) ;
doorbell [ 1 ] = cpu_to_be32 ( ( qp - > qpn < < 8 ) | size0 ) ;
wmb ( ) ;
mthca_write64 ( doorbell ,
dev - > kar + MTHCA_SEND_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
}
qp - > sq . next_ind = ind ;
qp - > sq . head + = nreq ;
spin_unlock_irqrestore ( & qp - > sq . lock , flags ) ;
return err ;
}
int mthca_tavor_post_receive ( struct ib_qp * ibqp , struct ib_recv_wr * wr ,
struct ib_recv_wr * * bad_wr )
{
struct mthca_dev * dev = to_mdev ( ibqp - > device ) ;
struct mthca_qp * qp = to_mqp ( ibqp ) ;
2005-11-09 14:59:57 -08:00
__be32 doorbell [ 2 ] ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
int err = 0 ;
int nreq ;
int i ;
int size ;
int size0 = 0 ;
int ind ;
void * wqe ;
void * prev_wqe ;
spin_lock_irqsave ( & qp - > rq . lock , flags ) ;
/* XXX check that state is OK to post receive */
ind = qp - > rq . next_ind ;
for ( nreq = 0 ; wr ; + + nreq , wr = wr - > next ) {
2005-11-09 14:59:57 -08:00
if ( unlikely ( nreq = = MTHCA_TAVOR_MAX_WQES_PER_RECV_DB ) ) {
nreq = 0 ;
doorbell [ 0 ] = cpu_to_be32 ( ( qp - > rq . next_ind < < qp - > rq . wqe_shift ) | size0 ) ;
doorbell [ 1 ] = cpu_to_be32 ( qp - > qpn < < 8 ) ;
wmb ( ) ;
mthca_write64 ( doorbell ,
dev - > kar + MTHCA_RECEIVE_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
qp - > rq . head + = MTHCA_TAVOR_MAX_WQES_PER_RECV_DB ;
size0 = 0 ;
}
2005-04-16 15:20:36 -07:00
if ( mthca_wq_overflow ( & qp - > rq , nreq , qp - > ibqp . recv_cq ) ) {
mthca_err ( dev , " RQ %06x full (%u head, %u tail, "
" %d max, %d nreq) \n " , qp - > qpn ,
qp - > rq . head , qp - > rq . tail ,
qp - > rq . max , nreq ) ;
err = - ENOMEM ;
* bad_wr = wr ;
goto out ;
}
wqe = get_recv_wqe ( qp , ind ) ;
prev_wqe = qp - > rq . last ;
qp - > rq . last = wqe ;
( ( struct mthca_next_seg * ) wqe ) - > nda_op = 0 ;
( ( struct mthca_next_seg * ) wqe ) - > ee_nds =
cpu_to_be32 ( MTHCA_NEXT_DBD ) ;
( ( struct mthca_next_seg * ) wqe ) - > flags = 0 ;
wqe + = sizeof ( struct mthca_next_seg ) ;
size = sizeof ( struct mthca_next_seg ) / 16 ;
if ( unlikely ( wr - > num_sge > qp - > rq . max_gs ) ) {
err = - EINVAL ;
* bad_wr = wr ;
goto out ;
}
for ( i = 0 ; i < wr - > num_sge ; + + i ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count =
cpu_to_be32 ( wr - > sg_list [ i ] . length ) ;
( ( struct mthca_data_seg * ) wqe ) - > lkey =
cpu_to_be32 ( wr - > sg_list [ i ] . lkey ) ;
( ( struct mthca_data_seg * ) wqe ) - > addr =
cpu_to_be64 ( wr - > sg_list [ i ] . addr ) ;
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
}
qp - > wrid [ ind ] = wr - > wr_id ;
2005-09-13 10:41:03 -07:00
( ( struct mthca_next_seg * ) prev_wqe ) - > nda_op =
cpu_to_be32 ( ( ind < < qp - > rq . wqe_shift ) | 1 ) ;
wmb ( ) ;
( ( struct mthca_next_seg * ) prev_wqe ) - > ee_nds =
cpu_to_be32 ( MTHCA_NEXT_DBD | size ) ;
2005-04-16 15:20:36 -07:00
if ( ! size0 )
size0 = size ;
+ + ind ;
if ( unlikely ( ind > = qp - > rq . max ) )
ind - = qp - > rq . max ;
}
out :
if ( likely ( nreq ) ) {
doorbell [ 0 ] = cpu_to_be32 ( ( qp - > rq . next_ind < < qp - > rq . wqe_shift ) | size0 ) ;
doorbell [ 1 ] = cpu_to_be32 ( ( qp - > qpn < < 8 ) | nreq ) ;
wmb ( ) ;
mthca_write64 ( doorbell ,
dev - > kar + MTHCA_RECEIVE_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
}
qp - > rq . next_ind = ind ;
qp - > rq . head + = nreq ;
spin_unlock_irqrestore ( & qp - > rq . lock , flags ) ;
return err ;
}
int mthca_arbel_post_send ( struct ib_qp * ibqp , struct ib_send_wr * wr ,
struct ib_send_wr * * bad_wr )
{
struct mthca_dev * dev = to_mdev ( ibqp - > device ) ;
struct mthca_qp * qp = to_mqp ( ibqp ) ;
2005-11-29 11:33:46 -08:00
__be32 doorbell [ 2 ] ;
2005-04-16 15:20:36 -07:00
void * wqe ;
void * prev_wqe ;
unsigned long flags ;
int err = 0 ;
int nreq ;
int i ;
int size ;
int size0 = 0 ;
u32 f0 = 0 ;
int ind ;
u8 op0 = 0 ;
spin_lock_irqsave ( & qp - > sq . lock , flags ) ;
/* XXX check that state is OK to post send */
ind = qp - > sq . head & ( qp - > sq . max - 1 ) ;
for ( nreq = 0 ; wr ; + + nreq , wr = wr - > next ) {
2005-11-29 11:33:46 -08:00
if ( unlikely ( nreq = = MTHCA_ARBEL_MAX_WQES_PER_SEND_DB ) ) {
nreq = 0 ;
doorbell [ 0 ] = cpu_to_be32 ( ( MTHCA_ARBEL_MAX_WQES_PER_SEND_DB < < 24 ) |
( ( qp - > sq . head & 0xffff ) < < 8 ) |
f0 | op0 ) ;
doorbell [ 1 ] = cpu_to_be32 ( ( qp - > qpn < < 8 ) | size0 ) ;
qp - > sq . head + = MTHCA_ARBEL_MAX_WQES_PER_SEND_DB ;
size0 = 0 ;
/*
* Make sure that descriptors are written before
* doorbell record .
*/
wmb ( ) ;
* qp - > sq . db = cpu_to_be32 ( qp - > sq . head & 0xffff ) ;
/*
* Make sure doorbell record is written before we
* write MMIO send doorbell .
*/
wmb ( ) ;
mthca_write64 ( doorbell ,
dev - > kar + MTHCA_SEND_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
}
2005-04-16 15:20:36 -07:00
if ( mthca_wq_overflow ( & qp - > sq , nreq , qp - > ibqp . send_cq ) ) {
mthca_err ( dev , " SQ %06x full (%u head, %u tail, "
" %d max, %d nreq) \n " , qp - > qpn ,
qp - > sq . head , qp - > sq . tail ,
qp - > sq . max , nreq ) ;
err = - ENOMEM ;
* bad_wr = wr ;
goto out ;
}
wqe = get_send_wqe ( qp , ind ) ;
prev_wqe = qp - > sq . last ;
qp - > sq . last = wqe ;
( ( struct mthca_next_seg * ) wqe ) - > flags =
( ( wr - > send_flags & IB_SEND_SIGNALED ) ?
cpu_to_be32 ( MTHCA_NEXT_CQ_UPDATE ) : 0 ) |
( ( wr - > send_flags & IB_SEND_SOLICITED ) ?
cpu_to_be32 ( MTHCA_NEXT_SOLICIT ) : 0 ) |
cpu_to_be32 ( 1 ) ;
if ( wr - > opcode = = IB_WR_SEND_WITH_IMM | |
wr - > opcode = = IB_WR_RDMA_WRITE_WITH_IMM )
2005-04-16 15:26:16 -07:00
( ( struct mthca_next_seg * ) wqe ) - > imm = wr - > imm_data ;
2005-04-16 15:20:36 -07:00
wqe + = sizeof ( struct mthca_next_seg ) ;
size = sizeof ( struct mthca_next_seg ) / 16 ;
switch ( qp - > transport ) {
2005-04-16 15:26:23 -07:00
case RC :
switch ( wr - > opcode ) {
case IB_WR_ATOMIC_CMP_AND_SWP :
case IB_WR_ATOMIC_FETCH_AND_ADD :
( ( struct mthca_raddr_seg * ) wqe ) - > raddr =
cpu_to_be64 ( wr - > wr . atomic . remote_addr ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > rkey =
cpu_to_be32 ( wr - > wr . atomic . rkey ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > reserved = 0 ;
wqe + = sizeof ( struct mthca_raddr_seg ) ;
if ( wr - > opcode = = IB_WR_ATOMIC_CMP_AND_SWP ) {
( ( struct mthca_atomic_seg * ) wqe ) - > swap_add =
cpu_to_be64 ( wr - > wr . atomic . swap ) ;
( ( struct mthca_atomic_seg * ) wqe ) - > compare =
cpu_to_be64 ( wr - > wr . atomic . compare_add ) ;
} else {
( ( struct mthca_atomic_seg * ) wqe ) - > swap_add =
cpu_to_be64 ( wr - > wr . atomic . compare_add ) ;
( ( struct mthca_atomic_seg * ) wqe ) - > compare = 0 ;
}
wqe + = sizeof ( struct mthca_atomic_seg ) ;
2005-11-09 11:30:14 -08:00
size + = ( sizeof ( struct mthca_raddr_seg ) +
sizeof ( struct mthca_atomic_seg ) ) / 16 ;
2005-04-16 15:26:23 -07:00
break ;
2005-06-27 14:36:42 -07:00
case IB_WR_RDMA_READ :
case IB_WR_RDMA_WRITE :
case IB_WR_RDMA_WRITE_WITH_IMM :
( ( struct mthca_raddr_seg * ) wqe ) - > raddr =
cpu_to_be64 ( wr - > wr . rdma . remote_addr ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > rkey =
cpu_to_be32 ( wr - > wr . rdma . rkey ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > reserved = 0 ;
wqe + = sizeof ( struct mthca_raddr_seg ) ;
size + = sizeof ( struct mthca_raddr_seg ) / 16 ;
break ;
default :
/* No extra segments required for sends */
break ;
}
break ;
case UC :
switch ( wr - > opcode ) {
2005-04-16 15:26:23 -07:00
case IB_WR_RDMA_WRITE :
case IB_WR_RDMA_WRITE_WITH_IMM :
( ( struct mthca_raddr_seg * ) wqe ) - > raddr =
cpu_to_be64 ( wr - > wr . rdma . remote_addr ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > rkey =
cpu_to_be32 ( wr - > wr . rdma . rkey ) ;
( ( struct mthca_raddr_seg * ) wqe ) - > reserved = 0 ;
wqe + = sizeof ( struct mthca_raddr_seg ) ;
size + = sizeof ( struct mthca_raddr_seg ) / 16 ;
break ;
default :
/* No extra segments required for sends */
break ;
}
break ;
2005-04-16 15:20:36 -07:00
case UD :
memcpy ( ( ( struct mthca_arbel_ud_seg * ) wqe ) - > av ,
to_mah ( wr - > wr . ud . ah ) - > av , MTHCA_AV_SIZE ) ;
( ( struct mthca_arbel_ud_seg * ) wqe ) - > dqpn =
cpu_to_be32 ( wr - > wr . ud . remote_qpn ) ;
( ( struct mthca_arbel_ud_seg * ) wqe ) - > qkey =
cpu_to_be32 ( wr - > wr . ud . remote_qkey ) ;
wqe + = sizeof ( struct mthca_arbel_ud_seg ) ;
size + = sizeof ( struct mthca_arbel_ud_seg ) / 16 ;
break ;
case MLX :
err = build_mlx_header ( dev , to_msqp ( qp ) , ind , wr ,
wqe - sizeof ( struct mthca_next_seg ) ,
wqe ) ;
if ( err ) {
* bad_wr = wr ;
goto out ;
}
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
break ;
}
if ( wr - > num_sge > qp - > sq . max_gs ) {
mthca_err ( dev , " too many gathers \n " ) ;
err = - EINVAL ;
* bad_wr = wr ;
goto out ;
}
for ( i = 0 ; i < wr - > num_sge ; + + i ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count =
cpu_to_be32 ( wr - > sg_list [ i ] . length ) ;
( ( struct mthca_data_seg * ) wqe ) - > lkey =
cpu_to_be32 ( wr - > sg_list [ i ] . lkey ) ;
( ( struct mthca_data_seg * ) wqe ) - > addr =
cpu_to_be64 ( wr - > sg_list [ i ] . addr ) ;
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
}
/* Add one more inline data segment for ICRC */
if ( qp - > transport = = MLX ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count =
cpu_to_be32 ( ( 1 < < 31 ) | 4 ) ;
( ( u32 * ) wqe ) [ 1 ] = 0 ;
wqe + = sizeof ( struct mthca_data_seg ) ;
size + = sizeof ( struct mthca_data_seg ) / 16 ;
}
qp - > wrid [ ind + qp - > rq . max ] = wr - > wr_id ;
if ( wr - > opcode > = ARRAY_SIZE ( mthca_opcode ) ) {
mthca_err ( dev , " opcode invalid \n " ) ;
err = - EINVAL ;
* bad_wr = wr ;
goto out ;
}
2005-09-13 10:41:03 -07:00
( ( struct mthca_next_seg * ) prev_wqe ) - > nda_op =
cpu_to_be32 ( ( ( ind < < qp - > sq . wqe_shift ) +
qp - > send_wqe_offset ) |
mthca_opcode [ wr - > opcode ] ) ;
wmb ( ) ;
( ( struct mthca_next_seg * ) prev_wqe ) - > ee_nds =
2006-02-27 21:02:00 -08:00
cpu_to_be32 ( MTHCA_NEXT_DBD | size |
2006-03-24 15:47:29 -08:00
( ( wr - > send_flags & IB_SEND_FENCE ) ?
MTHCA_NEXT_FENCE : 0 ) ) ;
2005-04-16 15:20:36 -07:00
if ( ! size0 ) {
size0 = size ;
op0 = mthca_opcode [ wr - > opcode ] ;
}
+ + ind ;
if ( unlikely ( ind > = qp - > sq . max ) )
ind - = qp - > sq . max ;
}
out :
if ( likely ( nreq ) ) {
doorbell [ 0 ] = cpu_to_be32 ( ( nreq < < 24 ) |
( ( qp - > sq . head & 0xffff ) < < 8 ) |
f0 | op0 ) ;
doorbell [ 1 ] = cpu_to_be32 ( ( qp - > qpn < < 8 ) | size0 ) ;
qp - > sq . head + = nreq ;
/*
* Make sure that descriptors are written before
* doorbell record .
*/
wmb ( ) ;
* qp - > sq . db = cpu_to_be32 ( qp - > sq . head & 0xffff ) ;
/*
* Make sure doorbell record is written before we
* write MMIO send doorbell .
*/
wmb ( ) ;
mthca_write64 ( doorbell ,
dev - > kar + MTHCA_SEND_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
}
spin_unlock_irqrestore ( & qp - > sq . lock , flags ) ;
return err ;
}
int mthca_arbel_post_receive ( struct ib_qp * ibqp , struct ib_recv_wr * wr ,
struct ib_recv_wr * * bad_wr )
{
struct mthca_dev * dev = to_mdev ( ibqp - > device ) ;
struct mthca_qp * qp = to_mqp ( ibqp ) ;
unsigned long flags ;
int err = 0 ;
int nreq ;
int ind ;
int i ;
void * wqe ;
2006-02-01 13:38:24 -08:00
spin_lock_irqsave ( & qp - > rq . lock , flags ) ;
2005-04-16 15:20:36 -07:00
/* XXX check that state is OK to post receive */
ind = qp - > rq . head & ( qp - > rq . max - 1 ) ;
for ( nreq = 0 ; wr ; + + nreq , wr = wr - > next ) {
if ( mthca_wq_overflow ( & qp - > rq , nreq , qp - > ibqp . recv_cq ) ) {
mthca_err ( dev , " RQ %06x full (%u head, %u tail, "
" %d max, %d nreq) \n " , qp - > qpn ,
qp - > rq . head , qp - > rq . tail ,
qp - > rq . max , nreq ) ;
err = - ENOMEM ;
* bad_wr = wr ;
goto out ;
}
wqe = get_recv_wqe ( qp , ind ) ;
( ( struct mthca_next_seg * ) wqe ) - > flags = 0 ;
wqe + = sizeof ( struct mthca_next_seg ) ;
if ( unlikely ( wr - > num_sge > qp - > rq . max_gs ) ) {
err = - EINVAL ;
* bad_wr = wr ;
goto out ;
}
for ( i = 0 ; i < wr - > num_sge ; + + i ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count =
cpu_to_be32 ( wr - > sg_list [ i ] . length ) ;
( ( struct mthca_data_seg * ) wqe ) - > lkey =
cpu_to_be32 ( wr - > sg_list [ i ] . lkey ) ;
( ( struct mthca_data_seg * ) wqe ) - > addr =
cpu_to_be64 ( wr - > sg_list [ i ] . addr ) ;
wqe + = sizeof ( struct mthca_data_seg ) ;
}
if ( i < qp - > rq . max_gs ) {
( ( struct mthca_data_seg * ) wqe ) - > byte_count = 0 ;
2005-04-16 15:26:33 -07:00
( ( struct mthca_data_seg * ) wqe ) - > lkey = cpu_to_be32 ( MTHCA_INVAL_LKEY ) ;
2005-04-16 15:20:36 -07:00
( ( struct mthca_data_seg * ) wqe ) - > addr = 0 ;
}
qp - > wrid [ ind ] = wr - > wr_id ;
+ + ind ;
if ( unlikely ( ind > = qp - > rq . max ) )
ind - = qp - > rq . max ;
}
out :
if ( likely ( nreq ) ) {
qp - > rq . head + = nreq ;
/*
* Make sure that descriptors are written before
* doorbell record .
*/
wmb ( ) ;
* qp - > rq . db = cpu_to_be32 ( qp - > rq . head & 0xffff ) ;
}
spin_unlock_irqrestore ( & qp - > rq . lock , flags ) ;
return err ;
}
2006-01-31 20:45:51 -08:00
void mthca_free_err_wqe ( struct mthca_dev * dev , struct mthca_qp * qp , int is_send ,
int index , int * dbd , __be32 * new_wqe )
2005-04-16 15:20:36 -07:00
{
struct mthca_next_seg * next ;
2005-08-19 10:59:31 -07:00
/*
* For SRQs , all WQEs generate a CQE , so we ' re always at the
* end of the doorbell chain .
*/
if ( qp - > ibqp . srq ) {
* new_wqe = 0 ;
2006-01-31 20:45:51 -08:00
return ;
2005-08-19 10:59:31 -07:00
}
2005-04-16 15:20:36 -07:00
if ( is_send )
next = get_send_wqe ( qp , index ) ;
else
next = get_recv_wqe ( qp , index ) ;
2005-08-19 09:19:05 -07:00
* dbd = ! ! ( next - > ee_nds & cpu_to_be32 ( MTHCA_NEXT_DBD ) ) ;
2005-04-16 15:20:36 -07:00
if ( next - > ee_nds & cpu_to_be32 ( 0x3f ) )
* new_wqe = ( next - > nda_op & cpu_to_be32 ( ~ 0x3f ) ) |
( next - > ee_nds & cpu_to_be32 ( 0x3f ) ) ;
else
* new_wqe = 0 ;
}
int __devinit mthca_init_qp_table ( struct mthca_dev * dev )
{
int err ;
u8 status ;
int i ;
spin_lock_init ( & dev - > qp_table . lock ) ;
/*
* We reserve 2 extra QPs per port for the special QPs . The
* special QP for port 1 has to be even , so round up .
*/
dev - > qp_table . sqp_start = ( dev - > limits . reserved_qps + 1 ) & ~ 1UL ;
err = mthca_alloc_init ( & dev - > qp_table . alloc ,
dev - > limits . num_qps ,
( 1 < < 24 ) - 1 ,
dev - > qp_table . sqp_start +
MTHCA_MAX_PORTS * 2 ) ;
if ( err )
return err ;
err = mthca_array_init ( & dev - > qp_table . qp ,
dev - > limits . num_qps ) ;
if ( err ) {
mthca_alloc_cleanup ( & dev - > qp_table . alloc ) ;
return err ;
}
for ( i = 0 ; i < 2 ; + + i ) {
err = mthca_CONF_SPECIAL_QP ( dev , i ? IB_QPT_GSI : IB_QPT_SMI ,
dev - > qp_table . sqp_start + i * 2 ,
& status ) ;
if ( err )
goto err_out ;
if ( status ) {
mthca_warn ( dev , " CONF_SPECIAL_QP returned "
" status %02x, aborting. \n " ,
status ) ;
err = - EINVAL ;
goto err_out ;
}
}
return 0 ;
err_out :
for ( i = 0 ; i < 2 ; + + i )
mthca_CONF_SPECIAL_QP ( dev , i , 0 , & status ) ;
mthca_array_cleanup ( & dev - > qp_table . qp , dev - > limits . num_qps ) ;
mthca_alloc_cleanup ( & dev - > qp_table . alloc ) ;
return err ;
}
2006-03-29 09:36:46 -08:00
void mthca_cleanup_qp_table ( struct mthca_dev * dev )
2005-04-16 15:20:36 -07:00
{
int i ;
u8 status ;
for ( i = 0 ; i < 2 ; + + i )
mthca_CONF_SPECIAL_QP ( dev , i , 0 , & status ) ;
2005-09-20 10:54:48 -07:00
mthca_array_cleanup ( & dev - > qp_table . qp , dev - > limits . num_qps ) ;
2005-04-16 15:20:36 -07:00
mthca_alloc_cleanup ( & dev - > qp_table . alloc ) ;
}