2015-07-30 22:17:43 +03:00
/*
2016-02-15 07:22:17 +03:00
* Copyright ( c ) 2015 , 2016 Intel Corporation .
2015-07-30 22:17:43 +03:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
# include <linux/net.h>
# include <rdma/ib_smi.h>
# include "hfi.h"
# include "mad.h"
2016-02-14 23:44:43 +03:00
# include "verbs_txreq.h"
2016-02-14 23:45:18 +03:00
# include "qp.h"
2015-07-30 22:17:43 +03:00
/**
* ud_loopback - handle send on loopback QPs
* @ sqp : the sending QP
* @ swqe : the send work request
*
* This is called from hfi1_make_ud_req ( ) to forward a WQE addressed
* to the same HFI .
* Note that the receive interrupt handler may be calling hfi1_ud_rcv ( )
* while this is being called .
*/
2016-01-20 01:42:28 +03:00
static void ud_loopback ( struct rvt_qp * sqp , struct rvt_swqe * swqe )
2015-07-30 22:17:43 +03:00
{
struct hfi1_ibport * ibp = to_iport ( sqp - > ibqp . device , sqp - > port_num ) ;
struct hfi1_pportdata * ppd ;
2016-01-20 01:42:28 +03:00
struct rvt_qp * qp ;
2017-04-29 21:41:18 +03:00
struct rdma_ah_attr * ah_attr ;
2015-07-30 22:17:43 +03:00
unsigned long flags ;
2016-01-20 01:42:28 +03:00
struct rvt_sge_state ssge ;
struct rvt_sge * sge ;
2015-07-30 22:17:43 +03:00
struct ib_wc wc ;
u32 length ;
enum ib_qp_type sqptype , dqptype ;
rcu_read_lock ( ) ;
2016-01-20 01:43:44 +03:00
qp = rvt_lookup_qpn ( ib_to_rvt ( sqp - > ibqp . device ) , & ibp - > rvp ,
swqe - > ud_wr . remote_qpn ) ;
2015-07-30 22:17:43 +03:00
if ( ! qp ) {
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
rcu_read_unlock ( ) ;
return ;
}
sqptype = sqp - > ibqp . qp_type = = IB_QPT_GSI ?
IB_QPT_UD : sqp - > ibqp . qp_type ;
dqptype = qp - > ibqp . qp_type = = IB_QPT_GSI ?
IB_QPT_UD : qp - > ibqp . qp_type ;
if ( dqptype ! = sqptype | |
2016-01-20 01:43:33 +03:00
! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_RECV_OK ) ) {
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
2016-01-20 01:42:17 +03:00
ah_attr = & ibah_to_rvtah ( swqe - > ud_wr . ah ) - > attr ;
2015-07-30 22:17:43 +03:00
ppd = ppd_from_ibp ( ibp ) ;
if ( qp - > ibqp . qp_num > 1 ) {
u16 pkey ;
u16 slid ;
2017-04-29 21:41:28 +03:00
u8 sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( ah_attr ) ] ;
2015-07-30 22:17:43 +03:00
pkey = hfi1_get_pkey ( ibp , sqp - > s_pkey_index ) ;
2017-04-29 21:41:28 +03:00
slid = ppd - > lid | ( rdma_ah_get_path_bits ( ah_attr ) &
2015-07-30 22:17:43 +03:00
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
if ( unlikely ( ingress_pkey_check ( ppd , pkey , sc5 ,
qp - > s_pkey_index , slid ) ) ) {
2015-12-10 17:59:40 +03:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_P_KEY , pkey ,
2017-04-29 21:41:28 +03:00
rdma_ah_get_sl ( ah_attr ) ,
2015-07-30 22:17:43 +03:00
sqp - > ibqp . qp_num , qp - > ibqp . qp_num ,
2017-04-29 21:41:28 +03:00
slid , rdma_ah_get_dlid ( ah_attr ) ) ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
}
/*
* Check that the qkey matches ( except for QP0 , see 9.6 .1 .4 .1 ) .
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR ( see 10.2 .5 ) .
*/
if ( qp - > ibqp . qp_num ) {
u32 qkey ;
2015-10-08 11:16:33 +03:00
qkey = ( int ) swqe - > ud_wr . remote_qkey < 0 ?
sqp - > qkey : swqe - > ud_wr . remote_qkey ;
2015-07-30 22:17:43 +03:00
if ( unlikely ( qkey ! = qp - > qkey ) ) {
u16 lid ;
2017-04-29 21:41:28 +03:00
lid = ppd - > lid | ( rdma_ah_get_path_bits ( ah_attr ) &
2015-07-30 22:17:43 +03:00
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
2015-12-10 17:59:40 +03:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_Q_KEY , qkey ,
2017-04-29 21:41:28 +03:00
rdma_ah_get_sl ( ah_attr ) ,
2015-07-30 22:17:43 +03:00
sqp - > ibqp . qp_num , qp - > ibqp . qp_num ,
2015-12-10 17:59:40 +03:00
lid ,
2017-04-29 21:41:28 +03:00
rdma_ah_get_dlid ( ah_attr ) ) ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
}
/*
* A GRH is expected to precede the data even if not
* present on the wire .
*/
length = swqe - > length ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . byte_len = length + sizeof ( struct ib_grh ) ;
if ( swqe - > wr . opcode = = IB_WR_SEND_WITH_IMM ) {
wc . wc_flags = IB_WC_WITH_IMM ;
wc . ex . imm_data = swqe - > wr . ex . imm_data ;
}
spin_lock_irqsave ( & qp - > r_lock , flags ) ;
/*
* Get the next work request entry to find where to put the data .
*/
2016-02-15 07:22:00 +03:00
if ( qp - > r_flags & RVT_R_REUSE_SGE ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags & = ~ RVT_R_REUSE_SGE ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
int ret ;
2016-01-20 01:43:44 +03:00
ret = hfi1_rvt_get_rwqe ( qp , 0 ) ;
2015-07-30 22:17:43 +03:00
if ( ret < 0 ) {
2017-02-08 16:27:01 +03:00
rvt_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
}
if ( ! ret ) {
if ( qp - > ibqp . qp_num = = 0 )
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_vl15_dropped + + ;
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
}
}
/* Silently drop packets which are too big. */
if ( unlikely ( wc . byte_len > qp - > r_len ) ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags | = RVT_R_REUSE_SGE ;
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
}
2017-04-29 21:41:28 +03:00
if ( rdma_ah_get_ah_flags ( ah_attr ) & IB_AH_GRH ) {
2016-07-25 23:40:40 +03:00
struct ib_grh grh ;
2017-04-29 21:41:28 +03:00
const struct ib_global_route * grd = rdma_ah_read_grh ( ah_attr ) ;
2016-07-25 23:40:40 +03:00
2017-04-29 21:41:28 +03:00
hfi1_make_grh ( ibp , & grh , grd , 0 , 0 ) ;
2016-07-25 23:40:40 +03:00
hfi1_copy_sge ( & qp - > r_sge , & grh ,
2017-02-08 16:27:31 +03:00
sizeof ( grh ) , true , false ) ;
2015-07-30 22:17:43 +03:00
wc . wc_flags | = IB_WC_GRH ;
2016-02-15 07:22:00 +03:00
} else {
2017-02-08 16:27:37 +03:00
rvt_skip_sge ( & qp - > r_sge , sizeof ( struct ib_grh ) , true ) ;
2016-02-15 07:22:00 +03:00
}
2015-07-30 22:17:43 +03:00
ssge . sg_list = swqe - > sg_list + 1 ;
ssge . sge = * swqe - > sg_list ;
ssge . num_sge = swqe - > wr . num_sge ;
sge = & ssge . sge ;
while ( length ) {
u32 len = sge - > length ;
if ( len > length )
len = length ;
if ( len > sge - > sge_length )
len = sge - > sge_length ;
WARN_ON_ONCE ( len = = 0 ) ;
2017-02-08 16:27:31 +03:00
hfi1_copy_sge ( & qp - > r_sge , sge - > vaddr , len , true , false ) ;
2015-07-30 22:17:43 +03:00
sge - > vaddr + = len ;
sge - > length - = len ;
sge - > sge_length - = len ;
if ( sge - > sge_length = = 0 ) {
if ( - - ssge . num_sge )
* sge = * ssge . sg_list + + ;
} else if ( sge - > length = = 0 & & sge - > mr - > lkey ) {
2016-01-20 01:41:55 +03:00
if ( + + sge - > n > = RVT_SEGSZ ) {
2015-07-30 22:17:43 +03:00
if ( + + sge - > m > = sge - > mr - > mapsz )
break ;
sge - > n = 0 ;
}
sge - > vaddr =
sge - > mr - > map [ sge - > m ] - > segs [ sge - > n ] . vaddr ;
sge - > length =
sge - > mr - > map [ sge - > m ] - > segs [ sge - > n ] . length ;
}
length - = len ;
}
2016-01-20 01:43:44 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2016-01-20 01:43:01 +03:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . opcode = IB_WC_RECV ;
wc . qp = & qp - > ibqp ;
wc . src_qp = sqp - > ibqp . qp_num ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | | qp - > ibqp . qp_type = = IB_QPT_SMI ) {
if ( sqp - > ibqp . qp_type = = IB_QPT_GSI | |
sqp - > ibqp . qp_type = = IB_QPT_SMI )
2015-10-08 11:16:33 +03:00
wc . pkey_index = swqe - > ud_wr . pkey_index ;
2015-07-30 22:17:43 +03:00
else
wc . pkey_index = sqp - > s_pkey_index ;
} else {
wc . pkey_index = 0 ;
}
2017-04-29 21:41:28 +03:00
wc . slid = ppd - > lid | ( rdma_ah_get_path_bits ( ah_attr ) &
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
2015-07-30 22:17:43 +03:00
/* Check for loopback when the port lid is not set */
if ( wc . slid = = 0 & & sqp - > ibqp . qp_type = = IB_QPT_GSI )
2016-01-20 01:42:11 +03:00
wc . slid = be16_to_cpu ( IB_LID_PERMISSIVE ) ;
2017-04-29 21:41:28 +03:00
wc . sl = rdma_ah_get_sl ( ah_attr ) ;
wc . dlid_path_bits = rdma_ah_get_dlid ( ah_attr ) & ( ( 1 < < ppd - > lmc ) - 1 ) ;
2015-07-30 22:17:43 +03:00
wc . port_num = qp - > port_num ;
/* Signal completion event if the solicited bit is set. */
2016-01-20 01:43:22 +03:00
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc ,
swqe - > wr . send_flags & IB_SEND_SOLICITED ) ;
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_loop_pkts + + ;
2015-07-30 22:17:43 +03:00
bail_unlock :
spin_unlock_irqrestore ( & qp - > r_lock , flags ) ;
drop :
rcu_read_unlock ( ) ;
}
/**
* hfi1_make_ud_req - construct a UD request packet
* @ qp : the QP
*
2016-02-14 23:10:04 +03:00
* Assume s_lock is held .
*
2015-07-30 22:17:43 +03:00
* Return 1 if constructed ; otherwise , return 0.
*/
2016-02-14 23:44:43 +03:00
int hfi1_make_ud_req ( struct rvt_qp * qp , struct hfi1_pkt_state * ps )
2015-07-30 22:17:43 +03:00
{
2016-01-20 01:42:00 +03:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-09-06 14:35:05 +03:00
struct ib_other_headers * ohdr ;
2017-04-29 21:41:18 +03:00
struct rdma_ah_attr * ah_attr ;
2015-07-30 22:17:43 +03:00
struct hfi1_pportdata * ppd ;
struct hfi1_ibport * ibp ;
2016-01-20 01:42:28 +03:00
struct rvt_swqe * wqe ;
2015-07-30 22:17:43 +03:00
u32 nwords ;
u32 extra_bytes ;
u32 bth0 ;
u16 lrh0 ;
u16 lid ;
int next_cur ;
u8 sc5 ;
2016-02-14 23:44:43 +03:00
ps - > s_txreq = get_txreq ( ps - > dev , qp ) ;
if ( IS_ERR ( ps - > s_txreq ) )
goto bail_no_tx ;
2016-01-20 01:43:33 +03:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_NEXT_SEND_OK ) ) {
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_FLUSH_SEND ) )
2015-07-30 22:17:43 +03:00
goto bail ;
/* We are in the error state, flush the work request. */
2016-02-14 23:10:04 +03:00
smp_read_barrier_depends ( ) ; /* see post_one_send */
if ( qp - > s_last = = ACCESS_ONCE ( qp - > s_head ) )
2015-07-30 22:17:43 +03:00
goto bail ;
/* If DMAs are in progress, we can't flush immediately. */
2016-02-14 23:45:36 +03:00
if ( iowait_sdma_pending ( & priv - > s_iowait ) ) {
2016-01-20 01:43:01 +03:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2015-07-30 22:17:43 +03:00
goto bail ;
}
2016-01-20 01:43:33 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_last ) ;
2015-07-30 22:17:43 +03:00
hfi1_send_complete ( qp , wqe , IB_WC_WR_FLUSH_ERR ) ;
2016-02-14 23:44:43 +03:00
goto done_free_tx ;
2015-07-30 22:17:43 +03:00
}
2016-02-14 23:10:04 +03:00
/* see post_one_send() */
smp_read_barrier_depends ( ) ;
if ( qp - > s_cur = = ACCESS_ONCE ( qp - > s_head ) )
2015-07-30 22:17:43 +03:00
goto bail ;
2016-01-20 01:43:33 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_cur ) ;
2015-07-30 22:17:43 +03:00
next_cur = qp - > s_cur + 1 ;
if ( next_cur > = qp - > s_size )
next_cur = 0 ;
/* Construct the header. */
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ppd = ppd_from_ibp ( ibp ) ;
2016-01-20 01:42:17 +03:00
ah_attr = & ibah_to_rvtah ( wqe - > ud_wr . ah ) - > attr ;
2017-04-29 21:41:28 +03:00
if ( rdma_ah_get_dlid ( ah_attr ) < be16_to_cpu ( IB_MULTICAST_LID_BASE ) | |
rdma_ah_get_dlid ( ah_attr ) = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) {
lid = rdma_ah_get_dlid ( ah_attr ) & ~ ( ( 1 < < ppd - > lmc ) - 1 ) ;
2016-02-15 07:21:52 +03:00
if ( unlikely ( ! loopback & &
( lid = = ppd - > lid | |
( lid = = be16_to_cpu ( IB_LID_PERMISSIVE ) & &
qp - > ibqp . qp_type = = IB_QPT_GSI ) ) ) ) {
2016-04-12 20:46:10 +03:00
unsigned long tflags = ps - > flags ;
2015-07-30 22:17:43 +03:00
/*
* If DMAs are in progress , we can ' t generate
* a completion for the loopback packet since
* it would be out of order .
* Instead of waiting , we could queue a
* zero length descriptor so we get a callback .
*/
2016-02-14 23:45:36 +03:00
if ( iowait_sdma_pending ( & priv - > s_iowait ) ) {
2016-01-20 01:43:01 +03:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2015-07-30 22:17:43 +03:00
goto bail ;
}
qp - > s_cur = next_cur ;
2016-04-12 20:46:10 +03:00
spin_unlock_irqrestore ( & qp - > s_lock , tflags ) ;
2015-07-30 22:17:43 +03:00
ud_loopback ( qp , wqe ) ;
2016-04-12 20:46:10 +03:00
spin_lock_irqsave ( & qp - > s_lock , tflags ) ;
ps - > flags = tflags ;
2015-07-30 22:17:43 +03:00
hfi1_send_complete ( qp , wqe , IB_WC_SUCCESS ) ;
2016-02-14 23:44:43 +03:00
goto done_free_tx ;
2015-07-30 22:17:43 +03:00
}
}
qp - > s_cur = next_cur ;
extra_bytes = - wqe - > length & 3 ;
nwords = ( wqe - > length + extra_bytes ) > > 2 ;
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
qp - > s_hdrwords = 7 ;
2016-12-08 06:33:00 +03:00
ps - > s_txreq - > s_cur_size = wqe - > length ;
2016-12-08 06:33:27 +03:00
ps - > s_txreq - > ss = & qp - > s_sge ;
2017-04-29 21:41:28 +03:00
qp - > s_srate = rdma_ah_get_static_rate ( ah_attr ) ;
2015-07-30 22:17:43 +03:00
qp - > srate_mbps = ib_rate_to_mbps ( qp - > s_srate ) ;
qp - > s_wqe = wqe ;
qp - > s_sge . sge = wqe - > sg_list [ 0 ] ;
qp - > s_sge . sg_list = wqe - > sg_list + 1 ;
qp - > s_sge . num_sge = wqe - > wr . num_sge ;
qp - > s_sge . total_len = wqe - > length ;
2017-04-29 21:41:28 +03:00
if ( rdma_ah_get_ah_flags ( ah_attr ) & IB_AH_GRH ) {
2015-07-30 22:17:43 +03:00
/* Header size in 32-bit words. */
2016-02-14 23:44:43 +03:00
qp - > s_hdrwords + = hfi1_make_grh ( ibp ,
& ps - > s_txreq - > phdr . hdr . u . l . grh ,
2017-04-29 21:41:28 +03:00
rdma_ah_read_grh ( ah_attr ) ,
2016-02-14 23:44:43 +03:00
qp - > s_hdrwords , nwords ) ;
2015-07-30 22:17:43 +03:00
lrh0 = HFI1_LRH_GRH ;
2016-02-14 23:44:43 +03:00
ohdr = & ps - > s_txreq - > phdr . hdr . u . l . oth ;
2015-07-30 22:17:43 +03:00
/*
* Don ' t worry about sending to locally attached multicast
* QPs . It is unspecified by the spec . what happens .
*/
} else {
/* Header size in 32-bit words. */
lrh0 = HFI1_LRH_BTH ;
2016-02-14 23:44:43 +03:00
ohdr = & ps - > s_txreq - > phdr . hdr . u . oth ;
2015-07-30 22:17:43 +03:00
}
if ( wqe - > wr . opcode = = IB_WR_SEND_WITH_IMM ) {
qp - > s_hdrwords + + ;
ohdr - > u . ud . imm_data = wqe - > wr . ex . imm_data ;
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE < < 24 ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
bth0 = IB_OPCODE_UD_SEND_ONLY < < 24 ;
2016-02-15 07:22:00 +03:00
}
2017-04-29 21:41:28 +03:00
sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( ah_attr ) ] ;
lrh0 | = ( rdma_ah_get_sl ( ah_attr ) & 0xf ) < < 4 ;
2015-07-30 22:17:43 +03:00
if ( qp - > ibqp . qp_type = = IB_QPT_SMI ) {
lrh0 | = 0xF000 ; /* Set VL (see ch. 13.5.3.1) */
2016-01-20 01:42:00 +03:00
priv - > s_sc = 0xf ;
2015-07-30 22:17:43 +03:00
} else {
lrh0 | = ( sc5 & 0xf ) < < 12 ;
2016-01-20 01:42:00 +03:00
priv - > s_sc = sc5 ;
2015-07-30 22:17:43 +03:00
}
2016-01-20 01:42:00 +03:00
priv - > s_sde = qp_to_sdma_engine ( qp , priv - > s_sc ) ;
2016-02-14 23:45:18 +03:00
ps - > s_txreq - > sde = priv - > s_sde ;
2016-02-14 23:45:00 +03:00
priv - > s_sendcontext = qp_to_send_context ( qp , priv - > s_sc ) ;
2016-02-14 23:45:18 +03:00
ps - > s_txreq - > psc = priv - > s_sendcontext ;
2016-02-14 23:44:43 +03:00
ps - > s_txreq - > phdr . hdr . lrh [ 0 ] = cpu_to_be16 ( lrh0 ) ;
2017-04-29 21:41:28 +03:00
ps - > s_txreq - > phdr . hdr . lrh [ 1 ] =
cpu_to_be16 ( rdma_ah_get_dlid ( ah_attr ) ) ;
2016-02-14 23:44:43 +03:00
ps - > s_txreq - > phdr . hdr . lrh [ 2 ] =
2015-07-30 22:17:43 +03:00
cpu_to_be16 ( qp - > s_hdrwords + nwords + SIZE_OF_CRC ) ;
2017-04-29 21:41:28 +03:00
if ( rdma_ah_get_dlid ( ah_attr ) = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) {
2016-02-14 23:44:43 +03:00
ps - > s_txreq - > phdr . hdr . lrh [ 3 ] = IB_LID_PERMISSIVE ;
} else {
2015-07-30 22:17:43 +03:00
lid = ppd - > lid ;
if ( lid ) {
2017-04-29 21:41:28 +03:00
lid | = rdma_ah_get_path_bits ( ah_attr ) &
( ( 1 < < ppd - > lmc ) - 1 ) ;
2016-02-14 23:44:43 +03:00
ps - > s_txreq - > phdr . hdr . lrh [ 3 ] = cpu_to_be16 ( lid ) ;
} else {
ps - > s_txreq - > phdr . hdr . lrh [ 3 ] = IB_LID_PERMISSIVE ;
}
2015-07-30 22:17:43 +03:00
}
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
bth0 | = extra_bytes < < 20 ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | | qp - > ibqp . qp_type = = IB_QPT_SMI )
2015-10-08 11:16:33 +03:00
bth0 | = hfi1_get_pkey ( ibp , wqe - > ud_wr . pkey_index ) ;
2015-07-30 22:17:43 +03:00
else
bth0 | = hfi1_get_pkey ( ibp , qp - > s_pkey_index ) ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
2015-10-08 11:16:33 +03:00
ohdr - > bth [ 1 ] = cpu_to_be32 ( wqe - > ud_wr . remote_qpn ) ;
2016-02-14 23:10:04 +03:00
ohdr - > bth [ 2 ] = cpu_to_be32 ( mask_psn ( wqe - > psn ) ) ;
2015-07-30 22:17:43 +03:00
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR ( see 10.2 .5 ) .
*/
2015-10-08 11:16:33 +03:00
ohdr - > u . ud . deth [ 0 ] = cpu_to_be32 ( ( int ) wqe - > ud_wr . remote_qkey < 0 ?
qp - > qkey : wqe - > ud_wr . remote_qkey ) ;
2015-07-30 22:17:43 +03:00
ohdr - > u . ud . deth [ 1 ] = cpu_to_be32 ( qp - > ibqp . qp_num ) ;
/* disarm any ahg */
2016-07-25 23:40:16 +03:00
priv - > s_ahg - > ahgcount = 0 ;
priv - > s_ahg - > ahgidx = 0 ;
priv - > s_ahg - > tx_flags = 0 ;
2016-02-14 23:45:18 +03:00
/* pbc */
ps - > s_txreq - > hdr_dwords = qp - > s_hdrwords + 2 ;
2015-07-30 22:17:43 +03:00
2016-02-14 23:10:04 +03:00
return 1 ;
2016-02-14 23:44:43 +03:00
done_free_tx :
hfi1_put_txreq ( ps - > s_txreq ) ;
ps - > s_txreq = NULL ;
return 1 ;
2015-07-30 22:17:43 +03:00
bail :
2016-02-14 23:44:43 +03:00
hfi1_put_txreq ( ps - > s_txreq ) ;
bail_no_tx :
ps - > s_txreq = NULL ;
2016-01-20 01:43:01 +03:00
qp - > s_flags & = ~ RVT_S_BUSY ;
2016-02-14 23:44:43 +03:00
qp - > s_hdrwords = 0 ;
return 0 ;
2015-07-30 22:17:43 +03:00
}
/*
* Hardware can ' t check this so we do it here .
*
* This is a slightly different algorithm than the standard pkey check . It
* special cases the management keys and allows for 0x7fff and 0xffff to be in
* the table at the same time .
*
* @ returns the index found or - 1 if not found
*/
int hfi1_lookup_pkey_idx ( struct hfi1_ibport * ibp , u16 pkey )
{
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
unsigned i ;
if ( pkey = = FULL_MGMT_P_KEY | | pkey = = LIM_MGMT_P_KEY ) {
unsigned lim_idx = - 1 ;
for ( i = 0 ; i < ARRAY_SIZE ( ppd - > pkeys ) ; + + i ) {
/* here we look for an exact match */
if ( ppd - > pkeys [ i ] = = pkey )
return i ;
if ( ppd - > pkeys [ i ] = = LIM_MGMT_P_KEY )
lim_idx = i ;
}
/* did not find 0xffff return 0x7fff idx if found */
if ( pkey = = FULL_MGMT_P_KEY )
return lim_idx ;
/* no match... */
return - 1 ;
}
pkey & = 0x7fff ; /* remove limited/full membership bit */
for ( i = 0 ; i < ARRAY_SIZE ( ppd - > pkeys ) ; + + i )
if ( ( ppd - > pkeys [ i ] & 0x7fff ) = = pkey )
return i ;
/*
* Should not get here , this means hardware failed to validate pkeys .
*/
return - 1 ;
}
2016-01-20 01:42:28 +03:00
void return_cnp ( struct hfi1_ibport * ibp , struct rvt_qp * qp , u32 remote_qpn ,
2015-07-30 22:17:43 +03:00
u32 pkey , u32 slid , u32 dlid , u8 sc5 ,
const struct ib_grh * old_grh )
{
u64 pbc , pbc_flags = 0 ;
u32 bth0 , plen , vl , hwords = 5 ;
u16 lrh0 ;
u8 sl = ibp - > sc_to_sl [ sc5 ] ;
2016-09-06 14:35:05 +03:00
struct ib_header hdr ;
struct ib_other_headers * ohdr ;
2015-07-30 22:17:43 +03:00
struct pio_buf * pbuf ;
struct send_context * ctxt = qp_to_send_context ( qp , sc5 ) ;
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
if ( old_grh ) {
struct ib_grh * grh = & hdr . u . l . grh ;
grh - > version_tclass_flow = old_grh - > version_tclass_flow ;
grh - > paylen = cpu_to_be16 ( ( hwords - 2 + SIZE_OF_CRC ) < < 2 ) ;
grh - > hop_limit = 0xff ;
grh - > sgid = old_grh - > dgid ;
grh - > dgid = old_grh - > sgid ;
ohdr = & hdr . u . l . oth ;
lrh0 = HFI1_LRH_GRH ;
hwords + = sizeof ( struct ib_grh ) / sizeof ( u32 ) ;
} else {
ohdr = & hdr . u . oth ;
lrh0 = HFI1_LRH_BTH ;
}
lrh0 | = ( sc5 & 0xf ) < < 12 | sl < < 4 ;
bth0 = pkey | ( IB_OPCODE_CNP < < 24 ) ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
2017-04-09 20:16:28 +03:00
ohdr - > bth [ 1 ] = cpu_to_be32 ( remote_qpn | ( 1 < < IB_BECN_SHIFT ) ) ;
2015-07-30 22:17:43 +03:00
ohdr - > bth [ 2 ] = 0 ; /* PSN 0 */
hdr . lrh [ 0 ] = cpu_to_be16 ( lrh0 ) ;
hdr . lrh [ 1 ] = cpu_to_be16 ( dlid ) ;
hdr . lrh [ 2 ] = cpu_to_be16 ( hwords + SIZE_OF_CRC ) ;
hdr . lrh [ 3 ] = cpu_to_be16 ( slid ) ;
plen = 2 /* PBC */ + hwords ;
2017-05-12 19:19:55 +03:00
pbc_flags | = ( ib_is_sc5 ( sc5 ) < < PBC_DC_INFO_SHIFT ) ;
2015-07-30 22:17:43 +03:00
vl = sc_to_vlt ( ppd - > dd , sc5 ) ;
pbc = create_pbc ( ppd , pbc_flags , qp - > srate_mbps , vl , plen ) ;
if ( ctxt ) {
pbuf = sc_buffer_alloc ( ctxt , plen , NULL , NULL ) ;
if ( pbuf )
ppd - > dd - > pio_inline_send ( ppd - > dd , pbuf , pbc ,
& hdr , hwords ) ;
}
}
/*
* opa_smp_check ( ) - Do the regular pkey checking , and the additional
2016-10-01 06:11:09 +03:00
* checks for SMPs specified in OPAv1 rev 1.0 , 9 / 19 / 2016 update , section
* 9.10 .25 ( " SMA Packet Checks " ) .
2015-07-30 22:17:43 +03:00
*
* Note that :
* - Checks are done using the pkey directly from the packet ' s BTH ,
* and specifically _not_ the pkey that we attach to the completion ,
* which may be different .
* - These checks are specifically for " non-local " SMPs ( i . e . , SMPs
* which originated on another node ) . SMPs which are sent from , and
* destined to this node are checked in opa_local_smp_check ( ) .
*
* At the point where opa_smp_check ( ) is called , we know :
* - destination QP is QP0
*
* opa_smp_check ( ) returns 0 if all checks succeed , 1 otherwise .
*/
static int opa_smp_check ( struct hfi1_ibport * ibp , u16 pkey , u8 sc5 ,
2016-01-20 01:42:28 +03:00
struct rvt_qp * qp , u16 slid , struct opa_smp * smp )
2015-07-30 22:17:43 +03:00
{
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
/*
* I don ' t think it ' s possible for us to get here with sc ! = 0xf ,
* but check it to be certain .
*/
if ( sc5 ! = 0xf )
return 1 ;
if ( rcv_pkey_check ( ppd , pkey , sc5 , slid ) )
return 1 ;
/*
* At this point we know ( and so don ' t need to check again ) that
* the pkey is either LIM_MGMT_P_KEY , or FULL_MGMT_P_KEY
* ( see ingress_pkey_check ) .
*/
if ( smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE & &
smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_LID_ROUTED ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
/*
* SMPs fall into one of four ( disjoint ) categories :
2016-10-01 06:11:09 +03:00
* SMA request , SMA response , SMA trap , or SMA trap repress .
* Our response depends , in part , on which type of SMP we ' re
* processing .
2015-07-30 22:17:43 +03:00
*
2016-10-01 06:11:09 +03:00
* If this is an SMA response , skip the check here .
*
* If this is an SMA request or SMA trap repress :
2015-07-30 22:17:43 +03:00
* - pkey ! = FULL_MGMT_P_KEY = >
* increment port recv constraint errors , drop MAD
2016-10-01 06:11:09 +03:00
*
* Otherwise :
* - accept if the port is running an SM
* - drop MAD if it ' s an SMA trap
* - pkey = = FULL_MGMT_P_KEY = >
* reply with unsupported method
* - pkey ! = FULL_MGMT_P_KEY = >
* increment port recv constraint errors , drop MAD
2015-07-30 22:17:43 +03:00
*/
switch ( smp - > method ) {
2016-10-01 06:11:09 +03:00
case IB_MGMT_METHOD_GET_RESP :
case IB_MGMT_METHOD_REPORT_RESP :
break ;
2015-07-30 22:17:43 +03:00
case IB_MGMT_METHOD_GET :
case IB_MGMT_METHOD_SET :
case IB_MGMT_METHOD_REPORT :
case IB_MGMT_METHOD_TRAP_REPRESS :
if ( pkey ! = FULL_MGMT_P_KEY ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
break ;
2016-10-01 06:11:09 +03:00
default :
2016-01-20 01:42:39 +03:00
if ( ibp - > rvp . port_cap_flags & IB_PORT_SM )
2015-07-30 22:17:43 +03:00
return 0 ;
2016-10-01 06:11:09 +03:00
if ( smp - > method = = IB_MGMT_METHOD_TRAP )
return 1 ;
2015-07-30 22:17:43 +03:00
if ( pkey = = FULL_MGMT_P_KEY ) {
smp - > status | = IB_SMP_UNSUP_METHOD ;
return 0 ;
}
2016-10-01 06:11:09 +03:00
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
2015-07-30 22:17:43 +03:00
}
return 0 ;
}
/**
* hfi1_ud_rcv - receive an incoming UD packet
* @ ibp : the port the packet came in on
* @ hdr : the packet header
* @ rcv_flags : flags relevant to rcv processing
* @ data : the packet data
* @ tlen : the packet length
* @ qp : the QP the packet came on
*
* This is called from qp_rcv ( ) to process an incoming UD packet
* for the given QP .
* Called at interrupt level .
*/
void hfi1_ud_rcv ( struct hfi1_packet * packet )
{
2016-09-06 14:35:05 +03:00
struct ib_other_headers * ohdr = packet - > ohdr ;
2015-07-30 22:17:43 +03:00
int opcode ;
u32 hdrsize = packet - > hlen ;
struct ib_wc wc ;
u32 qkey ;
u32 src_qp ;
u16 dlid , pkey ;
int mgmt_pkey_idx = - 1 ;
2017-02-08 16:26:25 +03:00
struct hfi1_ibport * ibp = rcd_to_iport ( packet - > rcd ) ;
2016-07-25 23:40:28 +03:00
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
2016-09-06 14:35:05 +03:00
struct ib_header * hdr = packet - > hdr ;
2015-07-30 22:17:43 +03:00
u32 rcv_flags = packet - > rcv_flags ;
void * data = packet - > ebuf ;
u32 tlen = packet - > tlen ;
2016-01-20 01:42:28 +03:00
struct rvt_qp * qp = packet - > qp ;
2015-07-30 22:17:43 +03:00
bool has_grh = rcv_flags & HFI1_HAS_GRH ;
2017-04-09 20:16:15 +03:00
u8 sc5 = hfi1_9B_get_sc5 ( hdr , packet - > rhf ) ;
2016-07-25 23:40:28 +03:00
u32 bth1 ;
u8 sl_from_sc , sl ;
u16 slid ;
u8 extra_bytes ;
2015-07-30 22:17:43 +03:00
2017-05-12 19:19:55 +03:00
qkey = ib_get_qkey ( ohdr ) ;
src_qp = ib_get_sqpn ( ohdr ) ;
2017-04-09 20:16:22 +03:00
dlid = ib_get_dlid ( hdr ) ;
2016-07-25 23:40:28 +03:00
bth1 = be32_to_cpu ( ohdr - > bth [ 1 ] ) ;
2017-04-09 20:16:22 +03:00
slid = ib_get_slid ( hdr ) ;
pkey = ib_bth_get_pkey ( ohdr ) ;
opcode = ib_bth_get_opcode ( ohdr ) ;
sl = ib_get_sl ( hdr ) ;
extra_bytes = ib_bth_get_pad ( ohdr ) ;
2016-07-25 23:40:28 +03:00
extra_bytes + = ( SIZE_OF_CRC < < 2 ) ;
sl_from_sc = ibp - > sc_to_sl [ sc5 ] ;
2015-07-30 22:17:43 +03:00
2016-07-25 23:38:07 +03:00
process_ecn ( qp , packet , ( opcode ! = IB_OPCODE_CNP ) ) ;
2015-07-30 22:17:43 +03:00
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets .
*/
2016-07-25 23:40:28 +03:00
if ( unlikely ( tlen < ( hdrsize + extra_bytes ) ) )
2015-07-30 22:17:43 +03:00
goto drop ;
2016-07-25 23:40:28 +03:00
tlen - = hdrsize + extra_bytes ;
2015-07-30 22:17:43 +03:00
/*
* Check that the permissive LID is only used on QP0
* and the QKEY matches ( see 9.6 .1 .4 .1 and 9.6 .1 .5 .1 ) .
*/
if ( qp - > ibqp . qp_num ) {
if ( unlikely ( hdr - > lrh [ 1 ] = = IB_LID_PERMISSIVE | |
hdr - > lrh [ 3 ] = = IB_LID_PERMISSIVE ) )
goto drop ;
if ( qp - > ibqp . qp_num > 1 ) {
if ( unlikely ( rcv_pkey_check ( ppd , pkey , sc5 , slid ) ) ) {
/*
* Traps will not be sent for packets dropped
* by the HW . This is fine , as sending trap
* for invalid pkeys is optional according to
* IB spec ( release 1.3 , section 10.9 .4 )
*/
2015-12-10 17:59:40 +03:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_P_KEY ,
2016-07-25 23:40:28 +03:00
pkey , sl ,
2015-07-30 22:17:43 +03:00
src_qp , qp - > ibqp . qp_num ,
2016-07-25 23:40:28 +03:00
slid , dlid ) ;
2015-07-30 22:17:43 +03:00
return ;
}
} else {
/* GSI packet */
mgmt_pkey_idx = hfi1_lookup_pkey_idx ( ibp , pkey ) ;
if ( mgmt_pkey_idx < 0 )
goto drop ;
}
if ( unlikely ( qkey ! = qp - > qkey ) ) {
2016-07-25 23:40:28 +03:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_Q_KEY , qkey , sl ,
2015-07-30 22:17:43 +03:00
src_qp , qp - > ibqp . qp_num ,
2016-07-25 23:40:28 +03:00
slid , dlid ) ;
2015-07-30 22:17:43 +03:00
return ;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if ( unlikely ( qp - > ibqp . qp_num = = 1 & &
2016-07-25 23:40:28 +03:00
( tlen > 2048 | | ( sc5 = = 0xF ) ) ) )
2015-07-30 22:17:43 +03:00
goto drop ;
} else {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp * smp = ( struct opa_smp * ) data ;
if ( opa_smp_check ( ibp , pkey , sc5 , qp , slid , smp ) )
goto drop ;
if ( tlen > 2048 )
goto drop ;
if ( ( hdr - > lrh [ 1 ] = = IB_LID_PERMISSIVE | |
hdr - > lrh [ 3 ] = = IB_LID_PERMISSIVE ) & &
smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE )
goto drop ;
/* look up SMI pkey */
mgmt_pkey_idx = hfi1_lookup_pkey_idx ( ibp , pkey ) ;
if ( mgmt_pkey_idx < 0 )
goto drop ;
}
if ( qp - > ibqp . qp_num > 1 & &
opcode = = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE ) {
wc . ex . imm_data = ohdr - > u . ud . imm_data ;
wc . wc_flags = IB_WC_WITH_IMM ;
tlen - = sizeof ( u32 ) ;
} else if ( opcode = = IB_OPCODE_UD_SEND_ONLY ) {
wc . ex . imm_data = 0 ;
wc . wc_flags = 0 ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
goto drop ;
2016-02-15 07:22:00 +03:00
}
2015-07-30 22:17:43 +03:00
/*
* A GRH is expected to precede the data even if not
* present on the wire .
*/
wc . byte_len = tlen + sizeof ( struct ib_grh ) ;
/*
* Get the next work request entry to find where to put the data .
*/
2016-02-15 07:22:00 +03:00
if ( qp - > r_flags & RVT_R_REUSE_SGE ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags & = ~ RVT_R_REUSE_SGE ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
int ret ;
2016-01-20 01:43:44 +03:00
ret = hfi1_rvt_get_rwqe ( qp , 0 ) ;
2015-07-30 22:17:43 +03:00
if ( ret < 0 ) {
2017-02-08 16:27:01 +03:00
rvt_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
2015-07-30 22:17:43 +03:00
return ;
}
if ( ! ret ) {
if ( qp - > ibqp . qp_num = = 0 )
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_vl15_dropped + + ;
2015-07-30 22:17:43 +03:00
return ;
}
}
/* Silently drop packets which are too big. */
if ( unlikely ( wc . byte_len > qp - > r_len ) ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags | = RVT_R_REUSE_SGE ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
if ( has_grh ) {
hfi1_copy_sge ( & qp - > r_sge , & hdr - > u . l . grh ,
2017-02-08 16:27:31 +03:00
sizeof ( struct ib_grh ) , true , false ) ;
2015-07-30 22:17:43 +03:00
wc . wc_flags | = IB_WC_GRH ;
2016-02-15 07:22:00 +03:00
} else {
2017-02-08 16:27:37 +03:00
rvt_skip_sge ( & qp - > r_sge , sizeof ( struct ib_grh ) , true ) ;
2016-02-15 07:22:00 +03:00
}
2016-02-04 01:35:49 +03:00
hfi1_copy_sge ( & qp - > r_sge , data , wc . byte_len - sizeof ( struct ib_grh ) ,
2017-02-08 16:27:31 +03:00
true , false ) ;
2016-01-20 01:43:44 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2016-01-20 01:43:01 +03:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 22:17:43 +03:00
return ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . opcode = IB_WC_RECV ;
wc . vendor_err = 0 ;
wc . qp = & qp - > ibqp ;
wc . src_qp = src_qp ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | |
qp - > ibqp . qp_type = = IB_QPT_SMI ) {
if ( mgmt_pkey_idx < 0 ) {
if ( net_ratelimit ( ) ) {
struct hfi1_devdata * dd = ppd - > dd ;
dd_dev_err ( dd , " QP type %d mgmt_pkey_idx < 0 and packet not dropped??? \n " ,
qp - > ibqp . qp_type ) ;
mgmt_pkey_idx = 0 ;
}
}
wc . pkey_index = ( unsigned ) mgmt_pkey_idx ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
wc . pkey_index = 0 ;
2016-02-15 07:22:00 +03:00
}
2015-07-30 22:17:43 +03:00
2016-07-25 23:40:28 +03:00
wc . slid = slid ;
wc . sl = sl_from_sc ;
2015-07-30 22:17:43 +03:00
/*
* Save the LMC lower bits if the destination LID is a unicast LID .
*/
2016-01-20 01:42:11 +03:00
wc . dlid_path_bits = dlid > = be16_to_cpu ( IB_MULTICAST_LID_BASE ) ? 0 :
2015-07-30 22:17:43 +03:00
dlid & ( ( 1 < < ppd_from_ibp ( ibp ) - > lmc ) - 1 ) ;
wc . port_num = qp - > port_num ;
/* Signal completion event if the solicited bit is set. */
2016-01-20 01:43:22 +03:00
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc ,
( ohdr - > bth [ 0 ] &
cpu_to_be32 ( IB_BTH_SOLICITED ) ) ! = 0 ) ;
2015-07-30 22:17:43 +03:00
return ;
drop :
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
}