2015-07-30 15:17:43 -04:00
/*
2016-02-14 20:22:17 -08:00
* Copyright ( c ) 2015 , 2016 Intel Corporation .
2015-07-30 15:17:43 -04:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
# include <linux/net.h>
# include <rdma/ib_smi.h>
# include "hfi.h"
# include "mad.h"
2016-02-14 12:44:43 -08:00
# include "verbs_txreq.h"
2016-02-14 12:45:18 -08:00
# include "qp.h"
2015-07-30 15:17:43 -04:00
/**
* ud_loopback - handle send on loopback QPs
* @ sqp : the sending QP
* @ swqe : the send work request
*
* This is called from hfi1_make_ud_req ( ) to forward a WQE addressed
* to the same HFI .
* Note that the receive interrupt handler may be calling hfi1_ud_rcv ( )
* while this is being called .
*/
2016-01-19 14:42:28 -08:00
static void ud_loopback ( struct rvt_qp * sqp , struct rvt_swqe * swqe )
2015-07-30 15:17:43 -04:00
{
struct hfi1_ibport * ibp = to_iport ( sqp - > ibqp . device , sqp - > port_num ) ;
struct hfi1_pportdata * ppd ;
2016-01-19 14:42:28 -08:00
struct rvt_qp * qp ;
2015-07-30 15:17:43 -04:00
struct ib_ah_attr * ah_attr ;
unsigned long flags ;
2016-01-19 14:42:28 -08:00
struct rvt_sge_state ssge ;
struct rvt_sge * sge ;
2015-07-30 15:17:43 -04:00
struct ib_wc wc ;
u32 length ;
enum ib_qp_type sqptype , dqptype ;
rcu_read_lock ( ) ;
2016-01-19 14:43:44 -08:00
qp = rvt_lookup_qpn ( ib_to_rvt ( sqp - > ibqp . device ) , & ibp - > rvp ,
swqe - > ud_wr . remote_qpn ) ;
2015-07-30 15:17:43 -04:00
if ( ! qp ) {
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 15:17:43 -04:00
rcu_read_unlock ( ) ;
return ;
}
sqptype = sqp - > ibqp . qp_type = = IB_QPT_GSI ?
IB_QPT_UD : sqp - > ibqp . qp_type ;
dqptype = qp - > ibqp . qp_type = = IB_QPT_GSI ?
IB_QPT_UD : qp - > ibqp . qp_type ;
if ( dqptype ! = sqptype | |
2016-01-19 14:43:33 -08:00
! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_RECV_OK ) ) {
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 15:17:43 -04:00
goto drop ;
}
2016-01-19 14:42:17 -08:00
ah_attr = & ibah_to_rvtah ( swqe - > ud_wr . ah ) - > attr ;
2015-07-30 15:17:43 -04:00
ppd = ppd_from_ibp ( ibp ) ;
if ( qp - > ibqp . qp_num > 1 ) {
u16 pkey ;
u16 slid ;
u8 sc5 = ibp - > sl_to_sc [ ah_attr - > sl ] ;
pkey = hfi1_get_pkey ( ibp , sqp - > s_pkey_index ) ;
slid = ppd - > lid | ( ah_attr - > src_path_bits &
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
if ( unlikely ( ingress_pkey_check ( ppd , pkey , sc5 ,
qp - > s_pkey_index , slid ) ) ) {
2015-12-10 09:59:40 -05:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_P_KEY , pkey ,
2015-07-30 15:17:43 -04:00
ah_attr - > sl ,
sqp - > ibqp . qp_num , qp - > ibqp . qp_num ,
2015-12-10 09:59:40 -05:00
slid , ah_attr - > dlid ) ;
2015-07-30 15:17:43 -04:00
goto drop ;
}
}
/*
* Check that the qkey matches ( except for QP0 , see 9.6 .1 .4 .1 ) .
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR ( see 10.2 .5 ) .
*/
if ( qp - > ibqp . qp_num ) {
u32 qkey ;
2015-10-08 09:16:33 +01:00
qkey = ( int ) swqe - > ud_wr . remote_qkey < 0 ?
sqp - > qkey : swqe - > ud_wr . remote_qkey ;
2015-07-30 15:17:43 -04:00
if ( unlikely ( qkey ! = qp - > qkey ) ) {
u16 lid ;
lid = ppd - > lid | ( ah_attr - > src_path_bits &
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
2015-12-10 09:59:40 -05:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_Q_KEY , qkey ,
2015-07-30 15:17:43 -04:00
ah_attr - > sl ,
sqp - > ibqp . qp_num , qp - > ibqp . qp_num ,
2015-12-10 09:59:40 -05:00
lid ,
ah_attr - > dlid ) ;
2015-07-30 15:17:43 -04:00
goto drop ;
}
}
/*
* A GRH is expected to precede the data even if not
* present on the wire .
*/
length = swqe - > length ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . byte_len = length + sizeof ( struct ib_grh ) ;
if ( swqe - > wr . opcode = = IB_WR_SEND_WITH_IMM ) {
wc . wc_flags = IB_WC_WITH_IMM ;
wc . ex . imm_data = swqe - > wr . ex . imm_data ;
}
spin_lock_irqsave ( & qp - > r_lock , flags ) ;
/*
* Get the next work request entry to find where to put the data .
*/
2016-02-14 20:22:00 -08:00
if ( qp - > r_flags & RVT_R_REUSE_SGE ) {
2016-01-19 14:43:01 -08:00
qp - > r_flags & = ~ RVT_R_REUSE_SGE ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
int ret ;
2016-01-19 14:43:44 -08:00
ret = hfi1_rvt_get_rwqe ( qp , 0 ) ;
2015-07-30 15:17:43 -04:00
if ( ret < 0 ) {
hfi1_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
goto bail_unlock ;
}
if ( ! ret ) {
if ( qp - > ibqp . qp_num = = 0 )
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_vl15_dropped + + ;
2015-07-30 15:17:43 -04:00
goto bail_unlock ;
}
}
/* Silently drop packets which are too big. */
if ( unlikely ( wc . byte_len > qp - > r_len ) ) {
2016-01-19 14:43:01 -08:00
qp - > r_flags | = RVT_R_REUSE_SGE ;
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 15:17:43 -04:00
goto bail_unlock ;
}
if ( ah_attr - > ah_flags & IB_AH_GRH ) {
hfi1_copy_sge ( & qp - > r_sge , & ah_attr - > grh ,
2016-02-03 14:35:49 -08:00
sizeof ( struct ib_grh ) , 1 , 0 ) ;
2015-07-30 15:17:43 -04:00
wc . wc_flags | = IB_WC_GRH ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
hfi1_skip_sge ( & qp - > r_sge , sizeof ( struct ib_grh ) , 1 ) ;
2016-02-14 20:22:00 -08:00
}
2015-07-30 15:17:43 -04:00
ssge . sg_list = swqe - > sg_list + 1 ;
ssge . sge = * swqe - > sg_list ;
ssge . num_sge = swqe - > wr . num_sge ;
sge = & ssge . sge ;
while ( length ) {
u32 len = sge - > length ;
if ( len > length )
len = length ;
if ( len > sge - > sge_length )
len = sge - > sge_length ;
WARN_ON_ONCE ( len = = 0 ) ;
2016-02-03 14:35:49 -08:00
hfi1_copy_sge ( & qp - > r_sge , sge - > vaddr , len , 1 , 0 ) ;
2015-07-30 15:17:43 -04:00
sge - > vaddr + = len ;
sge - > length - = len ;
sge - > sge_length - = len ;
if ( sge - > sge_length = = 0 ) {
if ( - - ssge . num_sge )
* sge = * ssge . sg_list + + ;
} else if ( sge - > length = = 0 & & sge - > mr - > lkey ) {
2016-01-19 14:41:55 -08:00
if ( + + sge - > n > = RVT_SEGSZ ) {
2015-07-30 15:17:43 -04:00
if ( + + sge - > m > = sge - > mr - > mapsz )
break ;
sge - > n = 0 ;
}
sge - > vaddr =
sge - > mr - > map [ sge - > m ] - > segs [ sge - > n ] . vaddr ;
sge - > length =
sge - > mr - > map [ sge - > m ] - > segs [ sge - > n ] . length ;
}
length - = len ;
}
2016-01-19 14:43:44 -08:00
rvt_put_ss ( & qp - > r_sge ) ;
2016-01-19 14:43:01 -08:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 15:17:43 -04:00
goto bail_unlock ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . opcode = IB_WC_RECV ;
wc . qp = & qp - > ibqp ;
wc . src_qp = sqp - > ibqp . qp_num ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | | qp - > ibqp . qp_type = = IB_QPT_SMI ) {
if ( sqp - > ibqp . qp_type = = IB_QPT_GSI | |
sqp - > ibqp . qp_type = = IB_QPT_SMI )
2015-10-08 09:16:33 +01:00
wc . pkey_index = swqe - > ud_wr . pkey_index ;
2015-07-30 15:17:43 -04:00
else
wc . pkey_index = sqp - > s_pkey_index ;
} else {
wc . pkey_index = 0 ;
}
wc . slid = ppd - > lid | ( ah_attr - > src_path_bits & ( ( 1 < < ppd - > lmc ) - 1 ) ) ;
/* Check for loopback when the port lid is not set */
if ( wc . slid = = 0 & & sqp - > ibqp . qp_type = = IB_QPT_GSI )
2016-01-19 14:42:11 -08:00
wc . slid = be16_to_cpu ( IB_LID_PERMISSIVE ) ;
2015-07-30 15:17:43 -04:00
wc . sl = ah_attr - > sl ;
wc . dlid_path_bits = ah_attr - > dlid & ( ( 1 < < ppd - > lmc ) - 1 ) ;
wc . port_num = qp - > port_num ;
/* Signal completion event if the solicited bit is set. */
2016-01-19 14:43:22 -08:00
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc ,
swqe - > wr . send_flags & IB_SEND_SOLICITED ) ;
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_loop_pkts + + ;
2015-07-30 15:17:43 -04:00
bail_unlock :
spin_unlock_irqrestore ( & qp - > r_lock , flags ) ;
drop :
rcu_read_unlock ( ) ;
}
/**
* hfi1_make_ud_req - construct a UD request packet
* @ qp : the QP
*
2016-02-14 12:10:04 -08:00
* Assume s_lock is held .
*
2015-07-30 15:17:43 -04:00
* Return 1 if constructed ; otherwise , return 0.
*/
2016-02-14 12:44:43 -08:00
int hfi1_make_ud_req ( struct rvt_qp * qp , struct hfi1_pkt_state * ps )
2015-07-30 15:17:43 -04:00
{
2016-01-19 14:42:00 -08:00
struct hfi1_qp_priv * priv = qp - > priv ;
2015-07-30 15:17:43 -04:00
struct hfi1_other_headers * ohdr ;
struct ib_ah_attr * ah_attr ;
struct hfi1_pportdata * ppd ;
struct hfi1_ibport * ibp ;
2016-01-19 14:42:28 -08:00
struct rvt_swqe * wqe ;
2015-07-30 15:17:43 -04:00
u32 nwords ;
u32 extra_bytes ;
u32 bth0 ;
u16 lrh0 ;
u16 lid ;
int next_cur ;
u8 sc5 ;
2016-02-14 12:44:43 -08:00
ps - > s_txreq = get_txreq ( ps - > dev , qp ) ;
if ( IS_ERR ( ps - > s_txreq ) )
goto bail_no_tx ;
2016-01-19 14:43:33 -08:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_NEXT_SEND_OK ) ) {
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_FLUSH_SEND ) )
2015-07-30 15:17:43 -04:00
goto bail ;
/* We are in the error state, flush the work request. */
2016-02-14 12:10:04 -08:00
smp_read_barrier_depends ( ) ; /* see post_one_send */
if ( qp - > s_last = = ACCESS_ONCE ( qp - > s_head ) )
2015-07-30 15:17:43 -04:00
goto bail ;
/* If DMAs are in progress, we can't flush immediately. */
2016-02-14 12:45:36 -08:00
if ( iowait_sdma_pending ( & priv - > s_iowait ) ) {
2016-01-19 14:43:01 -08:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2015-07-30 15:17:43 -04:00
goto bail ;
}
2016-01-19 14:43:33 -08:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_last ) ;
2015-07-30 15:17:43 -04:00
hfi1_send_complete ( qp , wqe , IB_WC_WR_FLUSH_ERR ) ;
2016-02-14 12:44:43 -08:00
goto done_free_tx ;
2015-07-30 15:17:43 -04:00
}
2016-02-14 12:10:04 -08:00
/* see post_one_send() */
smp_read_barrier_depends ( ) ;
if ( qp - > s_cur = = ACCESS_ONCE ( qp - > s_head ) )
2015-07-30 15:17:43 -04:00
goto bail ;
2016-01-19 14:43:33 -08:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_cur ) ;
2015-07-30 15:17:43 -04:00
next_cur = qp - > s_cur + 1 ;
if ( next_cur > = qp - > s_size )
next_cur = 0 ;
/* Construct the header. */
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ppd = ppd_from_ibp ( ibp ) ;
2016-01-19 14:42:17 -08:00
ah_attr = & ibah_to_rvtah ( wqe - > ud_wr . ah ) - > attr ;
2016-01-19 14:42:11 -08:00
if ( ah_attr - > dlid < be16_to_cpu ( IB_MULTICAST_LID_BASE ) | |
ah_attr - > dlid = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) {
2015-07-30 15:17:43 -04:00
lid = ah_attr - > dlid & ~ ( ( 1 < < ppd - > lmc ) - 1 ) ;
2016-02-14 20:21:52 -08:00
if ( unlikely ( ! loopback & &
( lid = = ppd - > lid | |
( lid = = be16_to_cpu ( IB_LID_PERMISSIVE ) & &
qp - > ibqp . qp_type = = IB_QPT_GSI ) ) ) ) {
2016-04-12 10:46:10 -07:00
unsigned long tflags = ps - > flags ;
2015-07-30 15:17:43 -04:00
/*
* If DMAs are in progress , we can ' t generate
* a completion for the loopback packet since
* it would be out of order .
* Instead of waiting , we could queue a
* zero length descriptor so we get a callback .
*/
2016-02-14 12:45:36 -08:00
if ( iowait_sdma_pending ( & priv - > s_iowait ) ) {
2016-01-19 14:43:01 -08:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2015-07-30 15:17:43 -04:00
goto bail ;
}
qp - > s_cur = next_cur ;
2016-04-12 10:46:10 -07:00
spin_unlock_irqrestore ( & qp - > s_lock , tflags ) ;
2015-07-30 15:17:43 -04:00
ud_loopback ( qp , wqe ) ;
2016-04-12 10:46:10 -07:00
spin_lock_irqsave ( & qp - > s_lock , tflags ) ;
ps - > flags = tflags ;
2015-07-30 15:17:43 -04:00
hfi1_send_complete ( qp , wqe , IB_WC_SUCCESS ) ;
2016-02-14 12:44:43 -08:00
goto done_free_tx ;
2015-07-30 15:17:43 -04:00
}
}
qp - > s_cur = next_cur ;
extra_bytes = - wqe - > length & 3 ;
nwords = ( wqe - > length + extra_bytes ) > > 2 ;
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
qp - > s_hdrwords = 7 ;
qp - > s_cur_size = wqe - > length ;
qp - > s_cur_sge = & qp - > s_sge ;
qp - > s_srate = ah_attr - > static_rate ;
qp - > srate_mbps = ib_rate_to_mbps ( qp - > s_srate ) ;
qp - > s_wqe = wqe ;
qp - > s_sge . sge = wqe - > sg_list [ 0 ] ;
qp - > s_sge . sg_list = wqe - > sg_list + 1 ;
qp - > s_sge . num_sge = wqe - > wr . num_sge ;
qp - > s_sge . total_len = wqe - > length ;
if ( ah_attr - > ah_flags & IB_AH_GRH ) {
/* Header size in 32-bit words. */
2016-02-14 12:44:43 -08:00
qp - > s_hdrwords + = hfi1_make_grh ( ibp ,
& ps - > s_txreq - > phdr . hdr . u . l . grh ,
& ah_attr - > grh ,
qp - > s_hdrwords , nwords ) ;
2015-07-30 15:17:43 -04:00
lrh0 = HFI1_LRH_GRH ;
2016-02-14 12:44:43 -08:00
ohdr = & ps - > s_txreq - > phdr . hdr . u . l . oth ;
2015-07-30 15:17:43 -04:00
/*
* Don ' t worry about sending to locally attached multicast
* QPs . It is unspecified by the spec . what happens .
*/
} else {
/* Header size in 32-bit words. */
lrh0 = HFI1_LRH_BTH ;
2016-02-14 12:44:43 -08:00
ohdr = & ps - > s_txreq - > phdr . hdr . u . oth ;
2015-07-30 15:17:43 -04:00
}
if ( wqe - > wr . opcode = = IB_WR_SEND_WITH_IMM ) {
qp - > s_hdrwords + + ;
ohdr - > u . ud . imm_data = wqe - > wr . ex . imm_data ;
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE < < 24 ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
bth0 = IB_OPCODE_UD_SEND_ONLY < < 24 ;
2016-02-14 20:22:00 -08:00
}
2015-07-30 15:17:43 -04:00
sc5 = ibp - > sl_to_sc [ ah_attr - > sl ] ;
lrh0 | = ( ah_attr - > sl & 0xf ) < < 4 ;
if ( qp - > ibqp . qp_type = = IB_QPT_SMI ) {
lrh0 | = 0xF000 ; /* Set VL (see ch. 13.5.3.1) */
2016-01-19 14:42:00 -08:00
priv - > s_sc = 0xf ;
2015-07-30 15:17:43 -04:00
} else {
lrh0 | = ( sc5 & 0xf ) < < 12 ;
2016-01-19 14:42:00 -08:00
priv - > s_sc = sc5 ;
2015-07-30 15:17:43 -04:00
}
2016-01-19 14:42:00 -08:00
priv - > s_sde = qp_to_sdma_engine ( qp , priv - > s_sc ) ;
2016-02-14 12:45:18 -08:00
ps - > s_txreq - > sde = priv - > s_sde ;
2016-02-14 12:45:00 -08:00
priv - > s_sendcontext = qp_to_send_context ( qp , priv - > s_sc ) ;
2016-02-14 12:45:18 -08:00
ps - > s_txreq - > psc = priv - > s_sendcontext ;
2016-02-14 12:44:43 -08:00
ps - > s_txreq - > phdr . hdr . lrh [ 0 ] = cpu_to_be16 ( lrh0 ) ;
ps - > s_txreq - > phdr . hdr . lrh [ 1 ] = cpu_to_be16 ( ah_attr - > dlid ) ;
ps - > s_txreq - > phdr . hdr . lrh [ 2 ] =
2015-07-30 15:17:43 -04:00
cpu_to_be16 ( qp - > s_hdrwords + nwords + SIZE_OF_CRC ) ;
2016-02-14 12:44:43 -08:00
if ( ah_attr - > dlid = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) {
ps - > s_txreq - > phdr . hdr . lrh [ 3 ] = IB_LID_PERMISSIVE ;
} else {
2015-07-30 15:17:43 -04:00
lid = ppd - > lid ;
if ( lid ) {
lid | = ah_attr - > src_path_bits & ( ( 1 < < ppd - > lmc ) - 1 ) ;
2016-02-14 12:44:43 -08:00
ps - > s_txreq - > phdr . hdr . lrh [ 3 ] = cpu_to_be16 ( lid ) ;
} else {
ps - > s_txreq - > phdr . hdr . lrh [ 3 ] = IB_LID_PERMISSIVE ;
}
2015-07-30 15:17:43 -04:00
}
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
bth0 | = extra_bytes < < 20 ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | | qp - > ibqp . qp_type = = IB_QPT_SMI )
2015-10-08 09:16:33 +01:00
bth0 | = hfi1_get_pkey ( ibp , wqe - > ud_wr . pkey_index ) ;
2015-07-30 15:17:43 -04:00
else
bth0 | = hfi1_get_pkey ( ibp , qp - > s_pkey_index ) ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
2015-10-08 09:16:33 +01:00
ohdr - > bth [ 1 ] = cpu_to_be32 ( wqe - > ud_wr . remote_qpn ) ;
2016-02-14 12:10:04 -08:00
ohdr - > bth [ 2 ] = cpu_to_be32 ( mask_psn ( wqe - > psn ) ) ;
2015-07-30 15:17:43 -04:00
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR ( see 10.2 .5 ) .
*/
2015-10-08 09:16:33 +01:00
ohdr - > u . ud . deth [ 0 ] = cpu_to_be32 ( ( int ) wqe - > ud_wr . remote_qkey < 0 ?
qp - > qkey : wqe - > ud_wr . remote_qkey ) ;
2015-07-30 15:17:43 -04:00
ohdr - > u . ud . deth [ 1 ] = cpu_to_be32 ( qp - > ibqp . qp_num ) ;
/* disarm any ahg */
2016-01-19 14:42:00 -08:00
priv - > s_hdr - > ahgcount = 0 ;
priv - > s_hdr - > ahgidx = 0 ;
priv - > s_hdr - > tx_flags = 0 ;
priv - > s_hdr - > sde = NULL ;
2016-02-14 12:45:18 -08:00
/* pbc */
ps - > s_txreq - > hdr_dwords = qp - > s_hdrwords + 2 ;
2015-07-30 15:17:43 -04:00
2016-02-14 12:10:04 -08:00
return 1 ;
2016-02-14 12:44:43 -08:00
done_free_tx :
hfi1_put_txreq ( ps - > s_txreq ) ;
ps - > s_txreq = NULL ;
return 1 ;
2015-07-30 15:17:43 -04:00
bail :
2016-02-14 12:44:43 -08:00
hfi1_put_txreq ( ps - > s_txreq ) ;
bail_no_tx :
ps - > s_txreq = NULL ;
2016-01-19 14:43:01 -08:00
qp - > s_flags & = ~ RVT_S_BUSY ;
2016-02-14 12:44:43 -08:00
qp - > s_hdrwords = 0 ;
return 0 ;
2015-07-30 15:17:43 -04:00
}
/*
* Hardware can ' t check this so we do it here .
*
* This is a slightly different algorithm than the standard pkey check . It
* special cases the management keys and allows for 0x7fff and 0xffff to be in
* the table at the same time .
*
* @ returns the index found or - 1 if not found
*/
int hfi1_lookup_pkey_idx ( struct hfi1_ibport * ibp , u16 pkey )
{
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
unsigned i ;
if ( pkey = = FULL_MGMT_P_KEY | | pkey = = LIM_MGMT_P_KEY ) {
unsigned lim_idx = - 1 ;
for ( i = 0 ; i < ARRAY_SIZE ( ppd - > pkeys ) ; + + i ) {
/* here we look for an exact match */
if ( ppd - > pkeys [ i ] = = pkey )
return i ;
if ( ppd - > pkeys [ i ] = = LIM_MGMT_P_KEY )
lim_idx = i ;
}
/* did not find 0xffff return 0x7fff idx if found */
if ( pkey = = FULL_MGMT_P_KEY )
return lim_idx ;
/* no match... */
return - 1 ;
}
pkey & = 0x7fff ; /* remove limited/full membership bit */
for ( i = 0 ; i < ARRAY_SIZE ( ppd - > pkeys ) ; + + i )
if ( ( ppd - > pkeys [ i ] & 0x7fff ) = = pkey )
return i ;
/*
* Should not get here , this means hardware failed to validate pkeys .
*/
return - 1 ;
}
2016-01-19 14:42:28 -08:00
void return_cnp ( struct hfi1_ibport * ibp , struct rvt_qp * qp , u32 remote_qpn ,
2015-07-30 15:17:43 -04:00
u32 pkey , u32 slid , u32 dlid , u8 sc5 ,
const struct ib_grh * old_grh )
{
u64 pbc , pbc_flags = 0 ;
u32 bth0 , plen , vl , hwords = 5 ;
u16 lrh0 ;
u8 sl = ibp - > sc_to_sl [ sc5 ] ;
struct hfi1_ib_header hdr ;
struct hfi1_other_headers * ohdr ;
struct pio_buf * pbuf ;
struct send_context * ctxt = qp_to_send_context ( qp , sc5 ) ;
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
if ( old_grh ) {
struct ib_grh * grh = & hdr . u . l . grh ;
grh - > version_tclass_flow = old_grh - > version_tclass_flow ;
grh - > paylen = cpu_to_be16 ( ( hwords - 2 + SIZE_OF_CRC ) < < 2 ) ;
grh - > hop_limit = 0xff ;
grh - > sgid = old_grh - > dgid ;
grh - > dgid = old_grh - > sgid ;
ohdr = & hdr . u . l . oth ;
lrh0 = HFI1_LRH_GRH ;
hwords + = sizeof ( struct ib_grh ) / sizeof ( u32 ) ;
} else {
ohdr = & hdr . u . oth ;
lrh0 = HFI1_LRH_BTH ;
}
lrh0 | = ( sc5 & 0xf ) < < 12 | sl < < 4 ;
bth0 = pkey | ( IB_OPCODE_CNP < < 24 ) ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
ohdr - > bth [ 1 ] = cpu_to_be32 ( remote_qpn | ( 1 < < HFI1_BECN_SHIFT ) ) ;
ohdr - > bth [ 2 ] = 0 ; /* PSN 0 */
hdr . lrh [ 0 ] = cpu_to_be16 ( lrh0 ) ;
hdr . lrh [ 1 ] = cpu_to_be16 ( dlid ) ;
hdr . lrh [ 2 ] = cpu_to_be16 ( hwords + SIZE_OF_CRC ) ;
hdr . lrh [ 3 ] = cpu_to_be16 ( slid ) ;
plen = 2 /* PBC */ + hwords ;
pbc_flags | = ( ! ! ( sc5 & 0x10 ) ) < < PBC_DC_INFO_SHIFT ;
vl = sc_to_vlt ( ppd - > dd , sc5 ) ;
pbc = create_pbc ( ppd , pbc_flags , qp - > srate_mbps , vl , plen ) ;
if ( ctxt ) {
pbuf = sc_buffer_alloc ( ctxt , plen , NULL , NULL ) ;
if ( pbuf )
ppd - > dd - > pio_inline_send ( ppd - > dd , pbuf , pbc ,
& hdr , hwords ) ;
}
}
/*
* opa_smp_check ( ) - Do the regular pkey checking , and the additional
* checks for SMPs specified in OPAv1 rev 0.90 , section 9.10 .26
* ( " SMA Packet Checks " ) .
*
* Note that :
* - Checks are done using the pkey directly from the packet ' s BTH ,
* and specifically _not_ the pkey that we attach to the completion ,
* which may be different .
* - These checks are specifically for " non-local " SMPs ( i . e . , SMPs
* which originated on another node ) . SMPs which are sent from , and
* destined to this node are checked in opa_local_smp_check ( ) .
*
* At the point where opa_smp_check ( ) is called , we know :
* - destination QP is QP0
*
* opa_smp_check ( ) returns 0 if all checks succeed , 1 otherwise .
*/
static int opa_smp_check ( struct hfi1_ibport * ibp , u16 pkey , u8 sc5 ,
2016-01-19 14:42:28 -08:00
struct rvt_qp * qp , u16 slid , struct opa_smp * smp )
2015-07-30 15:17:43 -04:00
{
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
/*
* I don ' t think it ' s possible for us to get here with sc ! = 0xf ,
* but check it to be certain .
*/
if ( sc5 ! = 0xf )
return 1 ;
if ( rcv_pkey_check ( ppd , pkey , sc5 , slid ) )
return 1 ;
/*
* At this point we know ( and so don ' t need to check again ) that
* the pkey is either LIM_MGMT_P_KEY , or FULL_MGMT_P_KEY
* ( see ingress_pkey_check ) .
*/
if ( smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE & &
smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_LID_ROUTED ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
/*
* SMPs fall into one of four ( disjoint ) categories :
* SMA request , SMA response , trap , or trap repress .
* Our response depends , in part , on which type of
* SMP we ' re processing .
*
* If this is not an SMA request , or trap repress :
* - accept MAD if the port is running an SM
* - pkey = = FULL_MGMT_P_KEY = >
* reply with unsupported method ( i . e . , just mark
* the smp ' s status field here , and let it be
* processed normally )
* - pkey ! = LIM_MGMT_P_KEY = >
* increment port recv constraint errors , drop MAD
* If this is an SMA request or trap repress :
* - pkey ! = FULL_MGMT_P_KEY = >
* increment port recv constraint errors , drop MAD
*/
switch ( smp - > method ) {
case IB_MGMT_METHOD_GET :
case IB_MGMT_METHOD_SET :
case IB_MGMT_METHOD_REPORT :
case IB_MGMT_METHOD_TRAP_REPRESS :
if ( pkey ! = FULL_MGMT_P_KEY ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
break ;
case IB_MGMT_METHOD_SEND :
case IB_MGMT_METHOD_TRAP :
case IB_MGMT_METHOD_GET_RESP :
case IB_MGMT_METHOD_REPORT_RESP :
2016-01-19 14:42:39 -08:00
if ( ibp - > rvp . port_cap_flags & IB_PORT_SM )
2015-07-30 15:17:43 -04:00
return 0 ;
if ( pkey = = FULL_MGMT_P_KEY ) {
smp - > status | = IB_SMP_UNSUP_METHOD ;
return 0 ;
}
if ( pkey ! = LIM_MGMT_P_KEY ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
break ;
default :
break ;
}
return 0 ;
}
/**
* hfi1_ud_rcv - receive an incoming UD packet
* @ ibp : the port the packet came in on
* @ hdr : the packet header
* @ rcv_flags : flags relevant to rcv processing
* @ data : the packet data
* @ tlen : the packet length
* @ qp : the QP the packet came on
*
* This is called from qp_rcv ( ) to process an incoming UD packet
* for the given QP .
* Called at interrupt level .
*/
void hfi1_ud_rcv ( struct hfi1_packet * packet )
{
struct hfi1_other_headers * ohdr = packet - > ohdr ;
int opcode ;
u32 hdrsize = packet - > hlen ;
u32 pad ;
struct ib_wc wc ;
u32 qkey ;
u32 src_qp ;
u16 dlid , pkey ;
int mgmt_pkey_idx = - 1 ;
struct hfi1_ibport * ibp = & packet - > rcd - > ppd - > ibport_data ;
struct hfi1_ib_header * hdr = packet - > hdr ;
u32 rcv_flags = packet - > rcv_flags ;
void * data = packet - > ebuf ;
u32 tlen = packet - > tlen ;
2016-01-19 14:42:28 -08:00
struct rvt_qp * qp = packet - > qp ;
2015-07-30 15:17:43 -04:00
bool has_grh = rcv_flags & HFI1_HAS_GRH ;
bool sc4_bit = has_sc4_bit ( packet ) ;
u8 sc ;
u32 bth1 ;
int is_mcast ;
struct ib_grh * grh = NULL ;
qkey = be32_to_cpu ( ohdr - > u . ud . deth [ 0 ] ) ;
2016-01-19 14:43:44 -08:00
src_qp = be32_to_cpu ( ohdr - > u . ud . deth [ 1 ] ) & RVT_QPN_MASK ;
2015-07-30 15:17:43 -04:00
dlid = be16_to_cpu ( hdr - > lrh [ 1 ] ) ;
2016-01-19 14:42:11 -08:00
is_mcast = ( dlid > be16_to_cpu ( IB_MULTICAST_LID_BASE ) ) & &
( dlid ! = be16_to_cpu ( IB_LID_PERMISSIVE ) ) ;
2015-07-30 15:17:43 -04:00
bth1 = be32_to_cpu ( ohdr - > bth [ 1 ] ) ;
if ( unlikely ( bth1 & HFI1_BECN_SMASK ) ) {
/*
* In pre - B0 h / w the CNP_OPCODE is handled via an
2016-01-11 18:31:43 -05:00
* error path .
2015-07-30 15:17:43 -04:00
*/
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
2016-01-19 14:43:44 -08:00
u32 lqpn = be32_to_cpu ( ohdr - > bth [ 1 ] ) & RVT_QPN_MASK ;
2015-07-30 15:17:43 -04:00
u8 sl , sc5 ;
sc5 = ( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 12 ) & 0xf ;
sc5 | = sc4_bit ;
sl = ibp - > sc_to_sl [ sc5 ] ;
process_becn ( ppd , sl , 0 , lqpn , 0 , IB_CC_SVCTYPE_UD ) ;
}
/*
* The opcode is in the low byte when its in network order
* ( top byte when in host order ) .
*/
opcode = be32_to_cpu ( ohdr - > bth [ 0 ] ) > > 24 ;
opcode & = 0xff ;
pkey = ( u16 ) be32_to_cpu ( ohdr - > bth [ 0 ] ) ;
if ( ! is_mcast & & ( opcode ! = IB_OPCODE_CNP ) & & bth1 & HFI1_FECN_SMASK ) {
u16 slid = be16_to_cpu ( hdr - > lrh [ 3 ] ) ;
u8 sc5 ;
sc5 = ( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 12 ) & 0xf ;
sc5 | = sc4_bit ;
return_cnp ( ibp , qp , src_qp , pkey , dlid , slid , sc5 , grh ) ;
}
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets .
*/
pad = ( be32_to_cpu ( ohdr - > bth [ 0 ] ) > > 20 ) & 3 ;
if ( unlikely ( tlen < ( hdrsize + pad + 4 ) ) )
goto drop ;
tlen - = hdrsize + pad + 4 ;
/*
* Check that the permissive LID is only used on QP0
* and the QKEY matches ( see 9.6 .1 .4 .1 and 9.6 .1 .5 .1 ) .
*/
if ( qp - > ibqp . qp_num ) {
if ( unlikely ( hdr - > lrh [ 1 ] = = IB_LID_PERMISSIVE | |
hdr - > lrh [ 3 ] = = IB_LID_PERMISSIVE ) )
goto drop ;
if ( qp - > ibqp . qp_num > 1 ) {
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
u16 slid ;
u8 sc5 ;
sc5 = ( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 12 ) & 0xf ;
sc5 | = sc4_bit ;
slid = be16_to_cpu ( hdr - > lrh [ 3 ] ) ;
if ( unlikely ( rcv_pkey_check ( ppd , pkey , sc5 , slid ) ) ) {
/*
* Traps will not be sent for packets dropped
* by the HW . This is fine , as sending trap
* for invalid pkeys is optional according to
* IB spec ( release 1.3 , section 10.9 .4 )
*/
2015-12-10 09:59:40 -05:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_P_KEY ,
2015-07-30 15:17:43 -04:00
pkey ,
( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 4 ) &
0xF ,
src_qp , qp - > ibqp . qp_num ,
2015-12-10 09:59:40 -05:00
be16_to_cpu ( hdr - > lrh [ 3 ] ) ,
be16_to_cpu ( hdr - > lrh [ 1 ] ) ) ;
2015-07-30 15:17:43 -04:00
return ;
}
} else {
/* GSI packet */
mgmt_pkey_idx = hfi1_lookup_pkey_idx ( ibp , pkey ) ;
if ( mgmt_pkey_idx < 0 )
goto drop ;
}
if ( unlikely ( qkey ! = qp - > qkey ) ) {
2015-12-10 09:59:40 -05:00
hfi1_bad_pqkey ( ibp , OPA_TRAP_BAD_Q_KEY , qkey ,
2015-07-30 15:17:43 -04:00
( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 4 ) & 0xF ,
src_qp , qp - > ibqp . qp_num ,
2015-12-10 09:59:40 -05:00
be16_to_cpu ( hdr - > lrh [ 3 ] ) ,
be16_to_cpu ( hdr - > lrh [ 1 ] ) ) ;
2015-07-30 15:17:43 -04:00
return ;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if ( unlikely ( qp - > ibqp . qp_num = = 1 & &
( tlen > 2048 | |
( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 12 ) = = 15 ) ) )
goto drop ;
} else {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp * smp = ( struct opa_smp * ) data ;
u16 slid = be16_to_cpu ( hdr - > lrh [ 3 ] ) ;
u8 sc5 ;
sc5 = ( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 12 ) & 0xf ;
sc5 | = sc4_bit ;
if ( opa_smp_check ( ibp , pkey , sc5 , qp , slid , smp ) )
goto drop ;
if ( tlen > 2048 )
goto drop ;
if ( ( hdr - > lrh [ 1 ] = = IB_LID_PERMISSIVE | |
hdr - > lrh [ 3 ] = = IB_LID_PERMISSIVE ) & &
smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE )
goto drop ;
/* look up SMI pkey */
mgmt_pkey_idx = hfi1_lookup_pkey_idx ( ibp , pkey ) ;
if ( mgmt_pkey_idx < 0 )
goto drop ;
}
if ( qp - > ibqp . qp_num > 1 & &
opcode = = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE ) {
wc . ex . imm_data = ohdr - > u . ud . imm_data ;
wc . wc_flags = IB_WC_WITH_IMM ;
tlen - = sizeof ( u32 ) ;
} else if ( opcode = = IB_OPCODE_UD_SEND_ONLY ) {
wc . ex . imm_data = 0 ;
wc . wc_flags = 0 ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
goto drop ;
2016-02-14 20:22:00 -08:00
}
2015-07-30 15:17:43 -04:00
/*
* A GRH is expected to precede the data even if not
* present on the wire .
*/
wc . byte_len = tlen + sizeof ( struct ib_grh ) ;
/*
* Get the next work request entry to find where to put the data .
*/
2016-02-14 20:22:00 -08:00
if ( qp - > r_flags & RVT_R_REUSE_SGE ) {
2016-01-19 14:43:01 -08:00
qp - > r_flags & = ~ RVT_R_REUSE_SGE ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
int ret ;
2016-01-19 14:43:44 -08:00
ret = hfi1_rvt_get_rwqe ( qp , 0 ) ;
2015-07-30 15:17:43 -04:00
if ( ret < 0 ) {
hfi1_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
return ;
}
if ( ! ret ) {
if ( qp - > ibqp . qp_num = = 0 )
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_vl15_dropped + + ;
2015-07-30 15:17:43 -04:00
return ;
}
}
/* Silently drop packets which are too big. */
if ( unlikely ( wc . byte_len > qp - > r_len ) ) {
2016-01-19 14:43:01 -08:00
qp - > r_flags | = RVT_R_REUSE_SGE ;
2015-07-30 15:17:43 -04:00
goto drop ;
}
if ( has_grh ) {
hfi1_copy_sge ( & qp - > r_sge , & hdr - > u . l . grh ,
2016-02-03 14:35:49 -08:00
sizeof ( struct ib_grh ) , 1 , 0 ) ;
2015-07-30 15:17:43 -04:00
wc . wc_flags | = IB_WC_GRH ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
hfi1_skip_sge ( & qp - > r_sge , sizeof ( struct ib_grh ) , 1 ) ;
2016-02-14 20:22:00 -08:00
}
2016-02-03 14:35:49 -08:00
hfi1_copy_sge ( & qp - > r_sge , data , wc . byte_len - sizeof ( struct ib_grh ) ,
1 , 0 ) ;
2016-01-19 14:43:44 -08:00
rvt_put_ss ( & qp - > r_sge ) ;
2016-01-19 14:43:01 -08:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 15:17:43 -04:00
return ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . opcode = IB_WC_RECV ;
wc . vendor_err = 0 ;
wc . qp = & qp - > ibqp ;
wc . src_qp = src_qp ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | |
qp - > ibqp . qp_type = = IB_QPT_SMI ) {
if ( mgmt_pkey_idx < 0 ) {
if ( net_ratelimit ( ) ) {
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
struct hfi1_devdata * dd = ppd - > dd ;
dd_dev_err ( dd , " QP type %d mgmt_pkey_idx < 0 and packet not dropped??? \n " ,
qp - > ibqp . qp_type ) ;
mgmt_pkey_idx = 0 ;
}
}
wc . pkey_index = ( unsigned ) mgmt_pkey_idx ;
2016-02-14 20:22:00 -08:00
} else {
2015-07-30 15:17:43 -04:00
wc . pkey_index = 0 ;
2016-02-14 20:22:00 -08:00
}
2015-07-30 15:17:43 -04:00
wc . slid = be16_to_cpu ( hdr - > lrh [ 3 ] ) ;
sc = ( be16_to_cpu ( hdr - > lrh [ 0 ] ) > > 12 ) & 0xf ;
sc | = sc4_bit ;
wc . sl = ibp - > sc_to_sl [ sc ] ;
/*
* Save the LMC lower bits if the destination LID is a unicast LID .
*/
2016-01-19 14:42:11 -08:00
wc . dlid_path_bits = dlid > = be16_to_cpu ( IB_MULTICAST_LID_BASE ) ? 0 :
2015-07-30 15:17:43 -04:00
dlid & ( ( 1 < < ppd_from_ibp ( ibp ) - > lmc ) - 1 ) ;
wc . port_num = qp - > port_num ;
/* Signal completion event if the solicited bit is set. */
2016-01-19 14:43:22 -08:00
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc ,
( ohdr - > bth [ 0 ] &
cpu_to_be32 ( IB_BTH_SOLICITED ) ) ! = 0 ) ;
2015-07-30 15:17:43 -04:00
return ;
drop :
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 15:17:43 -04:00
}