2015-07-30 22:17:43 +03:00
/*
2019-06-28 21:22:04 +03:00
* Copyright ( c ) 2015 - 2019 Intel Corporation .
2015-07-30 22:17:43 +03:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
# include <linux/net.h>
# include <rdma/ib_smi.h>
# include "hfi.h"
# include "mad.h"
2016-02-14 23:44:43 +03:00
# include "verbs_txreq.h"
2018-11-28 21:19:15 +03:00
# include "trace_ibhdrs.h"
2016-02-14 23:45:18 +03:00
# include "qp.h"
2015-07-30 22:17:43 +03:00
2017-08-04 23:54:23 +03:00
/* We support only two types - 9B and 16B for now */
static const hfi1_make_req hfi1_make_ud_req_tbl [ 2 ] = {
[ HFI1_PKT_TYPE_9B ] = & hfi1_make_ud_req_9B ,
[ HFI1_PKT_TYPE_16B ] = & hfi1_make_ud_req_16B
} ;
2015-07-30 22:17:43 +03:00
/**
* ud_loopback - handle send on loopback QPs
* @ sqp : the sending QP
* @ swqe : the send work request
*
* This is called from hfi1_make_ud_req ( ) to forward a WQE addressed
* to the same HFI .
* Note that the receive interrupt handler may be calling hfi1_ud_rcv ( )
* while this is being called .
*/
2016-01-20 01:42:28 +03:00
static void ud_loopback ( struct rvt_qp * sqp , struct rvt_swqe * swqe )
2015-07-30 22:17:43 +03:00
{
struct hfi1_ibport * ibp = to_iport ( sqp - > ibqp . device , sqp - > port_num ) ;
struct hfi1_pportdata * ppd ;
2017-08-04 23:54:23 +03:00
struct hfi1_qp_priv * priv = sqp - > priv ;
2016-01-20 01:42:28 +03:00
struct rvt_qp * qp ;
2017-04-29 21:41:18 +03:00
struct rdma_ah_attr * ah_attr ;
2015-07-30 22:17:43 +03:00
unsigned long flags ;
2016-01-20 01:42:28 +03:00
struct rvt_sge_state ssge ;
struct rvt_sge * sge ;
2015-07-30 22:17:43 +03:00
struct ib_wc wc ;
u32 length ;
enum ib_qp_type sqptype , dqptype ;
rcu_read_lock ( ) ;
2016-01-20 01:43:44 +03:00
qp = rvt_lookup_qpn ( ib_to_rvt ( sqp - > ibqp . device ) , & ibp - > rvp ,
2019-06-28 21:22:11 +03:00
rvt_get_swqe_remote_qpn ( swqe ) ) ;
2015-07-30 22:17:43 +03:00
if ( ! qp ) {
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
rcu_read_unlock ( ) ;
return ;
}
sqptype = sqp - > ibqp . qp_type = = IB_QPT_GSI ?
IB_QPT_UD : sqp - > ibqp . qp_type ;
dqptype = qp - > ibqp . qp_type = = IB_QPT_GSI ?
IB_QPT_UD : qp - > ibqp . qp_type ;
if ( dqptype ! = sqptype | |
2016-01-20 01:43:33 +03:00
! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_RECV_OK ) ) {
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
2019-06-28 21:22:11 +03:00
ah_attr = rvt_get_swqe_ah_attr ( swqe ) ;
2015-07-30 22:17:43 +03:00
ppd = ppd_from_ibp ( ibp ) ;
if ( qp - > ibqp . qp_num > 1 ) {
u16 pkey ;
2017-08-04 23:54:23 +03:00
u32 slid ;
2017-04-29 21:41:28 +03:00
u8 sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( ah_attr ) ] ;
2015-07-30 22:17:43 +03:00
pkey = hfi1_get_pkey ( ibp , sqp - > s_pkey_index ) ;
2017-04-29 21:41:28 +03:00
slid = ppd - > lid | ( rdma_ah_get_path_bits ( ah_attr ) &
2015-07-30 22:17:43 +03:00
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
if ( unlikely ( ingress_pkey_check ( ppd , pkey , sc5 ,
2017-08-04 23:54:10 +03:00
qp - > s_pkey_index ,
slid , false ) ) ) {
2017-05-30 03:22:01 +03:00
hfi1_bad_pkey ( ibp , pkey ,
rdma_ah_get_sl ( ah_attr ) ,
sqp - > ibqp . qp_num , qp - > ibqp . qp_num ,
slid , rdma_ah_get_dlid ( ah_attr ) ) ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
}
/*
* Check that the qkey matches ( except for QP0 , see 9.6 .1 .4 .1 ) .
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR ( see 10.2 .5 ) .
*/
if ( qp - > ibqp . qp_num ) {
u32 qkey ;
2019-06-28 21:22:11 +03:00
qkey = ( int ) rvt_get_swqe_remote_qkey ( swqe ) < 0 ?
sqp - > qkey : rvt_get_swqe_remote_qkey ( swqe ) ;
2017-05-30 03:22:01 +03:00
if ( unlikely ( qkey ! = qp - > qkey ) )
goto drop ; /* silently drop per IBTA spec */
2015-07-30 22:17:43 +03:00
}
/*
* A GRH is expected to precede the data even if not
* present on the wire .
*/
length = swqe - > length ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . byte_len = length + sizeof ( struct ib_grh ) ;
if ( swqe - > wr . opcode = = IB_WR_SEND_WITH_IMM ) {
wc . wc_flags = IB_WC_WITH_IMM ;
wc . ex . imm_data = swqe - > wr . ex . imm_data ;
}
spin_lock_irqsave ( & qp - > r_lock , flags ) ;
/*
* Get the next work request entry to find where to put the data .
*/
2016-02-15 07:22:00 +03:00
if ( qp - > r_flags & RVT_R_REUSE_SGE ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags & = ~ RVT_R_REUSE_SGE ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
int ret ;
2018-05-02 16:44:03 +03:00
ret = rvt_get_rwqe ( qp , false ) ;
2015-07-30 22:17:43 +03:00
if ( ret < 0 ) {
2017-02-08 16:27:01 +03:00
rvt_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
}
if ( ! ret ) {
if ( qp - > ibqp . qp_num = = 0 )
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_vl15_dropped + + ;
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
}
}
/* Silently drop packets which are too big. */
if ( unlikely ( wc . byte_len > qp - > r_len ) ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags | = RVT_R_REUSE_SGE ;
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
}
2017-04-29 21:41:28 +03:00
if ( rdma_ah_get_ah_flags ( ah_attr ) & IB_AH_GRH ) {
2016-07-25 23:40:40 +03:00
struct ib_grh grh ;
2017-08-04 23:54:23 +03:00
struct ib_global_route grd = * ( rdma_ah_read_grh ( ah_attr ) ) ;
/*
* For loopback packets with extended LIDs , the
* sgid_index in the GRH is 0 and the dgid is
* OPA GID of the sender . While creating a response
* to the loopback packet , IB core creates the new
* sgid_index from the DGID and that will be the
* OPA_GID_INDEX . The new dgid is from the sgid
* index and that will be in the IB GID format .
*
* We now have a case where the sent packet had a
* different sgid_index and dgid compared to the
* one that was received in response .
*
* Fix this inconsistency .
*/
if ( priv - > hdr_type = = HFI1_PKT_TYPE_16B ) {
if ( grd . sgid_index = = 0 )
grd . sgid_index = OPA_GID_INDEX ;
2016-07-25 23:40:40 +03:00
2017-08-04 23:54:23 +03:00
if ( ib_is_opa_gid ( & grd . dgid ) )
grd . dgid . global . interface_id =
cpu_to_be64 ( ppd - > guids [ HFI1_PORT_GUID_INDEX ] ) ;
}
hfi1_make_grh ( ibp , & grh , & grd , 0 , 0 ) ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , & grh ,
sizeof ( grh ) , true , false ) ;
2015-07-30 22:17:43 +03:00
wc . wc_flags | = IB_WC_GRH ;
2016-02-15 07:22:00 +03:00
} else {
2017-02-08 16:27:37 +03:00
rvt_skip_sge ( & qp - > r_sge , sizeof ( struct ib_grh ) , true ) ;
2016-02-15 07:22:00 +03:00
}
2015-07-30 22:17:43 +03:00
ssge . sg_list = swqe - > sg_list + 1 ;
ssge . sge = * swqe - > sg_list ;
ssge . num_sge = swqe - > wr . num_sge ;
sge = & ssge . sge ;
while ( length ) {
2019-01-24 06:08:29 +03:00
u32 len = rvt_get_sge_length ( sge , length ) ;
2015-07-30 22:17:43 +03:00
WARN_ON_ONCE ( len = = 0 ) ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , sge - > vaddr , len , true , false ) ;
2019-01-24 06:08:29 +03:00
rvt_update_sge ( & ssge , len , false ) ;
2015-07-30 22:17:43 +03:00
length - = len ;
}
2016-01-20 01:43:44 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2016-01-20 01:43:01 +03:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 22:17:43 +03:00
goto bail_unlock ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . opcode = IB_WC_RECV ;
wc . qp = & qp - > ibqp ;
wc . src_qp = sqp - > ibqp . qp_num ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | | qp - > ibqp . qp_type = = IB_QPT_SMI ) {
if ( sqp - > ibqp . qp_type = = IB_QPT_GSI | |
sqp - > ibqp . qp_type = = IB_QPT_SMI )
2019-06-28 21:22:11 +03:00
wc . pkey_index = rvt_get_swqe_pkey_index ( swqe ) ;
2015-07-30 22:17:43 +03:00
else
wc . pkey_index = sqp - > s_pkey_index ;
} else {
wc . pkey_index = 0 ;
}
2017-11-06 17:39:22 +03:00
wc . slid = ( ppd - > lid | ( rdma_ah_get_path_bits ( ah_attr ) &
( ( 1 < < ppd - > lmc ) - 1 ) ) ) & U16_MAX ;
2015-07-30 22:17:43 +03:00
/* Check for loopback when the port lid is not set */
if ( wc . slid = = 0 & & sqp - > ibqp . qp_type = = IB_QPT_GSI )
2016-01-20 01:42:11 +03:00
wc . slid = be16_to_cpu ( IB_LID_PERMISSIVE ) ;
2017-04-29 21:41:28 +03:00
wc . sl = rdma_ah_get_sl ( ah_attr ) ;
wc . dlid_path_bits = rdma_ah_get_dlid ( ah_attr ) & ( ( 1 < < ppd - > lmc ) - 1 ) ;
2015-07-30 22:17:43 +03:00
wc . port_num = qp - > port_num ;
/* Signal completion event if the solicited bit is set. */
2019-06-28 21:21:52 +03:00
rvt_recv_cq ( qp , & wc , swqe - > wr . send_flags & IB_SEND_SOLICITED ) ;
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_loop_pkts + + ;
2015-07-30 22:17:43 +03:00
bail_unlock :
spin_unlock_irqrestore ( & qp - > r_lock , flags ) ;
drop :
rcu_read_unlock ( ) ;
}
2017-08-04 23:54:23 +03:00
static void hfi1_make_bth_deth ( struct rvt_qp * qp , struct rvt_swqe * wqe ,
struct ib_other_headers * ohdr ,
u16 * pkey , u32 extra_bytes , bool bypass )
{
u32 bth0 ;
struct hfi1_ibport * ibp ;
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
if ( wqe - > wr . opcode = = IB_WR_SEND_WITH_IMM ) {
ohdr - > u . ud . imm_data = wqe - > wr . ex . imm_data ;
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE < < 24 ;
} else {
bth0 = IB_OPCODE_UD_SEND_ONLY < < 24 ;
}
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
bth0 | = extra_bytes < < 20 ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | | qp - > ibqp . qp_type = = IB_QPT_SMI )
2019-06-28 21:22:11 +03:00
* pkey = hfi1_get_pkey ( ibp , rvt_get_swqe_pkey_index ( wqe ) ) ;
2017-08-04 23:54:23 +03:00
else
* pkey = hfi1_get_pkey ( ibp , qp - > s_pkey_index ) ;
if ( ! bypass )
bth0 | = * pkey ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
2019-06-28 21:22:11 +03:00
ohdr - > bth [ 1 ] = cpu_to_be32 ( rvt_get_swqe_remote_qpn ( wqe ) ) ;
2017-08-04 23:54:23 +03:00
ohdr - > bth [ 2 ] = cpu_to_be32 ( mask_psn ( wqe - > psn ) ) ;
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR ( see 10.2 .5 ) .
*/
2019-06-28 21:22:11 +03:00
ohdr - > u . ud . deth [ 0 ] =
cpu_to_be32 ( ( int ) rvt_get_swqe_remote_qkey ( wqe ) < 0 ? qp - > qkey :
rvt_get_swqe_remote_qkey ( wqe ) ) ;
2017-08-04 23:54:23 +03:00
ohdr - > u . ud . deth [ 1 ] = cpu_to_be32 ( qp - > ibqp . qp_num ) ;
}
void hfi1_make_ud_req_9B ( struct rvt_qp * qp , struct hfi1_pkt_state * ps ,
struct rvt_swqe * wqe )
{
u32 nwords , extra_bytes ;
u16 len , slid , dlid , pkey ;
u16 lrh0 = 0 ;
u8 sc5 ;
struct hfi1_qp_priv * priv = qp - > priv ;
struct ib_other_headers * ohdr ;
struct rdma_ah_attr * ah_attr ;
struct hfi1_pportdata * ppd ;
struct hfi1_ibport * ibp ;
struct ib_grh * grh ;
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ppd = ppd_from_ibp ( ibp ) ;
2019-06-28 21:22:11 +03:00
ah_attr = rvt_get_swqe_ah_attr ( wqe ) ;
2017-08-04 23:54:23 +03:00
extra_bytes = - wqe - > length & 3 ;
nwords = ( ( wqe - > length + extra_bytes ) > > 2 ) + SIZE_OF_CRC ;
/* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
2018-02-01 21:46:07 +03:00
ps - > s_txreq - > hdr_dwords = 7 ;
2017-08-04 23:54:23 +03:00
if ( wqe - > wr . opcode = = IB_WR_SEND_WITH_IMM )
2018-02-01 21:46:07 +03:00
ps - > s_txreq - > hdr_dwords + + ;
2017-08-04 23:54:23 +03:00
if ( rdma_ah_get_ah_flags ( ah_attr ) & IB_AH_GRH ) {
grh = & ps - > s_txreq - > phdr . hdr . ibh . u . l . grh ;
2018-02-01 21:46:07 +03:00
ps - > s_txreq - > hdr_dwords + =
hfi1_make_grh ( ibp , grh , rdma_ah_read_grh ( ah_attr ) ,
2018-02-01 21:52:35 +03:00
ps - > s_txreq - > hdr_dwords - LRH_9B_DWORDS ,
nwords ) ;
2017-08-04 23:54:23 +03:00
lrh0 = HFI1_LRH_GRH ;
ohdr = & ps - > s_txreq - > phdr . hdr . ibh . u . l . oth ;
} else {
lrh0 = HFI1_LRH_BTH ;
ohdr = & ps - > s_txreq - > phdr . hdr . ibh . u . oth ;
}
sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( ah_attr ) ] ;
lrh0 | = ( rdma_ah_get_sl ( ah_attr ) & 0xf ) < < 4 ;
if ( qp - > ibqp . qp_type = = IB_QPT_SMI ) {
lrh0 | = 0xF000 ; /* Set VL (see ch. 13.5.3.1) */
priv - > s_sc = 0xf ;
} else {
lrh0 | = ( sc5 & 0xf ) < < 12 ;
priv - > s_sc = sc5 ;
}
dlid = opa_get_lid ( rdma_ah_get_dlid ( ah_attr ) , 9 B ) ;
if ( dlid = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) {
slid = be16_to_cpu ( IB_LID_PERMISSIVE ) ;
} else {
u16 lid = ( u16 ) ppd - > lid ;
if ( lid ) {
lid | = rdma_ah_get_path_bits ( ah_attr ) &
( ( 1 < < ppd - > lmc ) - 1 ) ;
slid = lid ;
} else {
slid = be16_to_cpu ( IB_LID_PERMISSIVE ) ;
}
}
hfi1_make_bth_deth ( qp , wqe , ohdr , & pkey , extra_bytes , false ) ;
2018-02-01 21:46:07 +03:00
len = ps - > s_txreq - > hdr_dwords + nwords ;
2017-08-04 23:54:23 +03:00
/* Setup the packet */
ps - > s_txreq - > phdr . hdr . hdr_type = HFI1_PKT_TYPE_9B ;
hfi1_make_ib_hdr ( & ps - > s_txreq - > phdr . hdr . ibh ,
lrh0 , len , dlid , slid ) ;
}
void hfi1_make_ud_req_16B ( struct rvt_qp * qp , struct hfi1_pkt_state * ps ,
struct rvt_swqe * wqe )
{
struct hfi1_qp_priv * priv = qp - > priv ;
struct ib_other_headers * ohdr ;
struct rdma_ah_attr * ah_attr ;
struct hfi1_pportdata * ppd ;
struct hfi1_ibport * ibp ;
u32 dlid , slid , nwords , extra_bytes ;
2019-06-28 21:22:11 +03:00
u32 dest_qp = rvt_get_swqe_remote_qpn ( wqe ) ;
2018-05-16 04:28:15 +03:00
u32 src_qp = qp - > ibqp . qp_num ;
2017-08-04 23:54:23 +03:00
u16 len , pkey ;
u8 l4 , sc5 ;
2018-05-16 04:28:15 +03:00
bool is_mgmt = false ;
2017-08-04 23:54:23 +03:00
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ppd = ppd_from_ibp ( ibp ) ;
2019-06-28 21:22:11 +03:00
ah_attr = rvt_get_swqe_ah_attr ( wqe ) ;
2018-05-16 04:28:15 +03:00
/*
* Build 16 B Management Packet if either the destination
* or source queue pair number is 0 or 1.
*/
if ( dest_qp = = 0 | | src_qp = = 0 | | dest_qp = = 1 | | src_qp = = 1 ) {
/* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
ps - > s_txreq - > hdr_dwords = 6 ;
is_mgmt = true ;
} else {
/* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
ps - > s_txreq - > hdr_dwords = 9 ;
if ( wqe - > wr . opcode = = IB_WR_SEND_WITH_IMM )
ps - > s_txreq - > hdr_dwords + + ;
}
2017-08-04 23:54:23 +03:00
/* SW provides space for CRC and LT for bypass packets. */
2018-02-01 21:46:07 +03:00
extra_bytes = hfi1_get_16b_padding ( ( ps - > s_txreq - > hdr_dwords < < 2 ) ,
2017-08-04 23:54:23 +03:00
wqe - > length ) ;
nwords = ( ( wqe - > length + extra_bytes + SIZE_OF_LT ) > > 2 ) + SIZE_OF_CRC ;
if ( ( rdma_ah_get_ah_flags ( ah_attr ) & IB_AH_GRH ) & &
hfi1_check_mcast ( rdma_ah_get_dlid ( ah_attr ) ) ) {
struct ib_grh * grh ;
struct ib_global_route * grd = rdma_ah_retrieve_grh ( ah_attr ) ;
/*
* Ensure OPA GIDs are transformed to IB gids
* before creating the GRH .
*/
if ( grd - > sgid_index = = OPA_GID_INDEX ) {
dd_dev_warn ( ppd - > dd , " Bad sgid_index. sgid_index: %d \n " ,
grd - > sgid_index ) ;
grd - > sgid_index = 0 ;
}
grh = & ps - > s_txreq - > phdr . hdr . opah . u . l . grh ;
2018-02-01 21:52:35 +03:00
ps - > s_txreq - > hdr_dwords + = hfi1_make_grh (
ibp , grh , grd ,
ps - > s_txreq - > hdr_dwords - LRH_16B_DWORDS ,
nwords ) ;
2017-08-04 23:54:23 +03:00
ohdr = & ps - > s_txreq - > phdr . hdr . opah . u . l . oth ;
l4 = OPA_16B_L4_IB_GLOBAL ;
} else {
ohdr = & ps - > s_txreq - > phdr . hdr . opah . u . oth ;
l4 = OPA_16B_L4_IB_LOCAL ;
}
sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( ah_attr ) ] ;
if ( qp - > ibqp . qp_type = = IB_QPT_SMI )
priv - > s_sc = 0xf ;
else
priv - > s_sc = sc5 ;
dlid = opa_get_lid ( rdma_ah_get_dlid ( ah_attr ) , 16 B ) ;
if ( ! ppd - > lid )
slid = be32_to_cpu ( OPA_LID_PERMISSIVE ) ;
else
slid = ppd - > lid | ( rdma_ah_get_path_bits ( ah_attr ) &
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
2018-05-16 04:28:15 +03:00
if ( is_mgmt ) {
l4 = OPA_16B_L4_FM ;
2019-06-28 21:22:11 +03:00
pkey = hfi1_get_pkey ( ibp , rvt_get_swqe_pkey_index ( wqe ) ) ;
2018-05-16 04:28:15 +03:00
hfi1_16B_set_qpn ( & ps - > s_txreq - > phdr . hdr . opah . u . mgmt ,
dest_qp , src_qp ) ;
} else {
hfi1_make_bth_deth ( qp , wqe , ohdr , & pkey , extra_bytes , true ) ;
}
2017-08-04 23:54:23 +03:00
/* Convert dwords to flits */
2018-02-01 21:46:07 +03:00
len = ( ps - > s_txreq - > hdr_dwords + nwords ) > > 1 ;
2017-08-04 23:54:23 +03:00
/* Setup the packet */
ps - > s_txreq - > phdr . hdr . hdr_type = HFI1_PKT_TYPE_16B ;
hfi1_make_16b_hdr ( & ps - > s_txreq - > phdr . hdr . opah ,
slid , dlid , len , pkey , 0 , 0 , l4 , priv - > s_sc ) ;
}
2015-07-30 22:17:43 +03:00
/**
* hfi1_make_ud_req - construct a UD request packet
* @ qp : the QP
2021-01-26 15:47:29 +03:00
* @ ps : the current packet state
2015-07-30 22:17:43 +03:00
*
2016-02-14 23:10:04 +03:00
* Assume s_lock is held .
*
2015-07-30 22:17:43 +03:00
* Return 1 if constructed ; otherwise , return 0.
*/
2016-02-14 23:44:43 +03:00
int hfi1_make_ud_req ( struct rvt_qp * qp , struct hfi1_pkt_state * ps )
2015-07-30 22:17:43 +03:00
{
2016-01-20 01:42:00 +03:00
struct hfi1_qp_priv * priv = qp - > priv ;
2017-04-29 21:41:18 +03:00
struct rdma_ah_attr * ah_attr ;
2015-07-30 22:17:43 +03:00
struct hfi1_pportdata * ppd ;
struct hfi1_ibport * ibp ;
2016-01-20 01:42:28 +03:00
struct rvt_swqe * wqe ;
2015-07-30 22:17:43 +03:00
int next_cur ;
2017-08-04 23:54:23 +03:00
u32 lid ;
2015-07-30 22:17:43 +03:00
2016-02-14 23:44:43 +03:00
ps - > s_txreq = get_txreq ( ps - > dev , qp ) ;
2018-06-20 19:29:08 +03:00
if ( ! ps - > s_txreq )
2016-02-14 23:44:43 +03:00
goto bail_no_tx ;
2016-01-20 01:43:33 +03:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_NEXT_SEND_OK ) ) {
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_FLUSH_SEND ) )
2015-07-30 22:17:43 +03:00
goto bail ;
/* We are in the error state, flush the work request. */
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
if ( qp - > s_last = = READ_ONCE ( qp - > s_head ) )
2015-07-30 22:17:43 +03:00
goto bail ;
/* If DMAs are in progress, we can't flush immediately. */
2016-02-14 23:45:36 +03:00
if ( iowait_sdma_pending ( & priv - > s_iowait ) ) {
2016-01-20 01:43:01 +03:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2015-07-30 22:17:43 +03:00
goto bail ;
}
2016-01-20 01:43:33 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_last ) ;
2018-09-26 20:44:42 +03:00
rvt_send_complete ( qp , wqe , IB_WC_WR_FLUSH_ERR ) ;
2016-02-14 23:44:43 +03:00
goto done_free_tx ;
2015-07-30 22:17:43 +03:00
}
2016-02-14 23:10:04 +03:00
/* see post_one_send() */
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
if ( qp - > s_cur = = READ_ONCE ( qp - > s_head ) )
2015-07-30 22:17:43 +03:00
goto bail ;
2016-01-20 01:43:33 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_cur ) ;
2015-07-30 22:17:43 +03:00
next_cur = qp - > s_cur + 1 ;
if ( next_cur > = qp - > s_size )
next_cur = 0 ;
/* Construct the header. */
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ppd = ppd_from_ibp ( ibp ) ;
2019-06-28 21:22:11 +03:00
ah_attr = rvt_get_swqe_ah_attr ( wqe ) ;
2017-08-04 23:54:23 +03:00
priv - > hdr_type = hfi1_get_hdr_type ( ppd - > lid , ah_attr ) ;
if ( ( ! hfi1_check_mcast ( rdma_ah_get_dlid ( ah_attr ) ) ) | |
( rdma_ah_get_dlid ( ah_attr ) = = be32_to_cpu ( OPA_LID_PERMISSIVE ) ) ) {
2017-04-29 21:41:28 +03:00
lid = rdma_ah_get_dlid ( ah_attr ) & ~ ( ( 1 < < ppd - > lmc ) - 1 ) ;
2016-02-15 07:21:52 +03:00
if ( unlikely ( ! loopback & &
2017-08-04 23:54:23 +03:00
( ( lid = = ppd - > lid ) | |
( ( lid = = be32_to_cpu ( OPA_LID_PERMISSIVE ) ) & &
( qp - > ibqp . qp_type = = IB_QPT_GSI ) ) ) ) ) {
2016-04-12 20:46:10 +03:00
unsigned long tflags = ps - > flags ;
2015-07-30 22:17:43 +03:00
/*
* If DMAs are in progress , we can ' t generate
* a completion for the loopback packet since
* it would be out of order .
* Instead of waiting , we could queue a
* zero length descriptor so we get a callback .
*/
2016-02-14 23:45:36 +03:00
if ( iowait_sdma_pending ( & priv - > s_iowait ) ) {
2016-01-20 01:43:01 +03:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2015-07-30 22:17:43 +03:00
goto bail ;
}
qp - > s_cur = next_cur ;
2016-04-12 20:46:10 +03:00
spin_unlock_irqrestore ( & qp - > s_lock , tflags ) ;
2015-07-30 22:17:43 +03:00
ud_loopback ( qp , wqe ) ;
2016-04-12 20:46:10 +03:00
spin_lock_irqsave ( & qp - > s_lock , tflags ) ;
ps - > flags = tflags ;
2018-09-26 20:44:42 +03:00
rvt_send_complete ( qp , wqe , IB_WC_SUCCESS ) ;
2016-02-14 23:44:43 +03:00
goto done_free_tx ;
2015-07-30 22:17:43 +03:00
}
}
qp - > s_cur = next_cur ;
2016-12-08 06:33:00 +03:00
ps - > s_txreq - > s_cur_size = wqe - > length ;
2016-12-08 06:33:27 +03:00
ps - > s_txreq - > ss = & qp - > s_sge ;
2017-04-29 21:41:28 +03:00
qp - > s_srate = rdma_ah_get_static_rate ( ah_attr ) ;
2015-07-30 22:17:43 +03:00
qp - > srate_mbps = ib_rate_to_mbps ( qp - > s_srate ) ;
qp - > s_wqe = wqe ;
qp - > s_sge . sge = wqe - > sg_list [ 0 ] ;
qp - > s_sge . sg_list = wqe - > sg_list + 1 ;
qp - > s_sge . num_sge = wqe - > wr . num_sge ;
qp - > s_sge . total_len = wqe - > length ;
2017-08-04 23:54:23 +03:00
/* Make the appropriate header */
hfi1_make_ud_req_tbl [ priv - > hdr_type ] ( qp , ps , qp - > s_wqe ) ;
2016-01-20 01:42:00 +03:00
priv - > s_sde = qp_to_sdma_engine ( qp , priv - > s_sc ) ;
2016-02-14 23:45:18 +03:00
ps - > s_txreq - > sde = priv - > s_sde ;
2016-02-14 23:45:00 +03:00
priv - > s_sendcontext = qp_to_send_context ( qp , priv - > s_sc ) ;
2016-02-14 23:45:18 +03:00
ps - > s_txreq - > psc = priv - > s_sendcontext ;
2015-07-30 22:17:43 +03:00
/* disarm any ahg */
2016-07-25 23:40:16 +03:00
priv - > s_ahg - > ahgcount = 0 ;
priv - > s_ahg - > ahgidx = 0 ;
priv - > s_ahg - > tx_flags = 0 ;
2015-07-30 22:17:43 +03:00
2016-02-14 23:10:04 +03:00
return 1 ;
2016-02-14 23:44:43 +03:00
done_free_tx :
hfi1_put_txreq ( ps - > s_txreq ) ;
ps - > s_txreq = NULL ;
return 1 ;
2015-07-30 22:17:43 +03:00
bail :
2016-02-14 23:44:43 +03:00
hfi1_put_txreq ( ps - > s_txreq ) ;
bail_no_tx :
ps - > s_txreq = NULL ;
2016-01-20 01:43:01 +03:00
qp - > s_flags & = ~ RVT_S_BUSY ;
2016-02-14 23:44:43 +03:00
return 0 ;
2015-07-30 22:17:43 +03:00
}
/*
* Hardware can ' t check this so we do it here .
*
* This is a slightly different algorithm than the standard pkey check . It
* special cases the management keys and allows for 0x7fff and 0xffff to be in
* the table at the same time .
*
* @ returns the index found or - 1 if not found
*/
int hfi1_lookup_pkey_idx ( struct hfi1_ibport * ibp , u16 pkey )
{
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
unsigned i ;
if ( pkey = = FULL_MGMT_P_KEY | | pkey = = LIM_MGMT_P_KEY ) {
unsigned lim_idx = - 1 ;
for ( i = 0 ; i < ARRAY_SIZE ( ppd - > pkeys ) ; + + i ) {
/* here we look for an exact match */
if ( ppd - > pkeys [ i ] = = pkey )
return i ;
if ( ppd - > pkeys [ i ] = = LIM_MGMT_P_KEY )
lim_idx = i ;
}
/* did not find 0xffff return 0x7fff idx if found */
if ( pkey = = FULL_MGMT_P_KEY )
return lim_idx ;
/* no match... */
return - 1 ;
}
pkey & = 0x7fff ; /* remove limited/full membership bit */
for ( i = 0 ; i < ARRAY_SIZE ( ppd - > pkeys ) ; + + i )
if ( ( ppd - > pkeys [ i ] & 0x7fff ) = = pkey )
return i ;
/*
* Should not get here , this means hardware failed to validate pkeys .
*/
return - 1 ;
}
2017-08-04 23:54:23 +03:00
void return_cnp_16B ( struct hfi1_ibport * ibp , struct rvt_qp * qp ,
2018-05-01 15:35:36 +03:00
u32 remote_qpn , u16 pkey , u32 slid , u32 dlid ,
2017-08-04 23:54:23 +03:00
u8 sc5 , const struct ib_grh * old_grh )
{
u64 pbc , pbc_flags = 0 ;
u32 bth0 , plen , vl , hwords = 7 ;
u16 len ;
u8 l4 ;
2018-11-28 21:19:15 +03:00
struct hfi1_opa_header hdr ;
2017-08-04 23:54:23 +03:00
struct ib_other_headers * ohdr ;
struct pio_buf * pbuf ;
struct send_context * ctxt = qp_to_send_context ( qp , sc5 ) ;
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
u32 nwords ;
2018-11-28 21:19:15 +03:00
hdr . hdr_type = HFI1_PKT_TYPE_16B ;
2017-08-04 23:54:23 +03:00
/* Populate length */
nwords = ( ( hfi1_get_16b_padding ( hwords < < 2 , 0 ) +
SIZE_OF_LT ) > > 2 ) + SIZE_OF_CRC ;
if ( old_grh ) {
2018-11-28 21:19:15 +03:00
struct ib_grh * grh = & hdr . opah . u . l . grh ;
2017-08-04 23:54:23 +03:00
grh - > version_tclass_flow = old_grh - > version_tclass_flow ;
2018-02-01 21:52:35 +03:00
grh - > paylen = cpu_to_be16 (
( hwords - LRH_16B_DWORDS + nwords ) < < 2 ) ;
2017-08-04 23:54:23 +03:00
grh - > hop_limit = 0xff ;
grh - > sgid = old_grh - > dgid ;
grh - > dgid = old_grh - > sgid ;
2018-11-28 21:19:15 +03:00
ohdr = & hdr . opah . u . l . oth ;
2017-08-04 23:54:23 +03:00
l4 = OPA_16B_L4_IB_GLOBAL ;
hwords + = sizeof ( struct ib_grh ) / sizeof ( u32 ) ;
} else {
2018-11-28 21:19:15 +03:00
ohdr = & hdr . opah . u . oth ;
2017-08-04 23:54:23 +03:00
l4 = OPA_16B_L4_IB_LOCAL ;
}
/* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
bth0 = ( IB_OPCODE_CNP < < 24 ) | ( 1 < < 16 ) |
( hfi1_get_16b_padding ( hwords < < 2 , 0 ) < < 20 ) ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
ohdr - > bth [ 1 ] = cpu_to_be32 ( remote_qpn ) ;
ohdr - > bth [ 2 ] = 0 ; /* PSN 0 */
/* Convert dwords to flits */
len = ( hwords + nwords ) > > 1 ;
2018-11-28 21:19:15 +03:00
hfi1_make_16b_hdr ( & hdr . opah , slid , dlid , len , pkey , 1 , 0 , l4 , sc5 ) ;
2017-08-04 23:54:23 +03:00
plen = 2 /* PBC */ + hwords + nwords ;
pbc_flags | = PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC ;
vl = sc_to_vlt ( ppd - > dd , sc5 ) ;
pbc = create_pbc ( ppd , pbc_flags , qp - > srate_mbps , vl , plen ) ;
if ( ctxt ) {
pbuf = sc_buffer_alloc ( ctxt , plen , NULL , NULL ) ;
2019-06-14 19:33:06 +03:00
if ( ! IS_ERR_OR_NULL ( pbuf ) ) {
2018-11-28 21:19:15 +03:00
trace_pio_output_ibhdr ( ppd - > dd , & hdr , sc5 ) ;
2017-08-04 23:54:23 +03:00
ppd - > dd - > pio_inline_send ( ppd - > dd , pbuf , pbc ,
& hdr , hwords ) ;
2018-11-28 21:19:15 +03:00
}
2017-08-04 23:54:23 +03:00
}
}
2016-01-20 01:42:28 +03:00
void return_cnp ( struct hfi1_ibport * ibp , struct rvt_qp * qp , u32 remote_qpn ,
2018-05-01 15:35:36 +03:00
u16 pkey , u32 slid , u32 dlid , u8 sc5 ,
2015-07-30 22:17:43 +03:00
const struct ib_grh * old_grh )
{
u64 pbc , pbc_flags = 0 ;
u32 bth0 , plen , vl , hwords = 5 ;
u16 lrh0 ;
u8 sl = ibp - > sc_to_sl [ sc5 ] ;
2018-11-28 21:19:15 +03:00
struct hfi1_opa_header hdr ;
2016-09-06 14:35:05 +03:00
struct ib_other_headers * ohdr ;
2015-07-30 22:17:43 +03:00
struct pio_buf * pbuf ;
struct send_context * ctxt = qp_to_send_context ( qp , sc5 ) ;
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
2018-11-28 21:19:15 +03:00
hdr . hdr_type = HFI1_PKT_TYPE_9B ;
2015-07-30 22:17:43 +03:00
if ( old_grh ) {
2018-11-28 21:19:15 +03:00
struct ib_grh * grh = & hdr . ibh . u . l . grh ;
2015-07-30 22:17:43 +03:00
grh - > version_tclass_flow = old_grh - > version_tclass_flow ;
2018-02-01 21:52:35 +03:00
grh - > paylen = cpu_to_be16 (
( hwords - LRH_9B_DWORDS + SIZE_OF_CRC ) < < 2 ) ;
2015-07-30 22:17:43 +03:00
grh - > hop_limit = 0xff ;
grh - > sgid = old_grh - > dgid ;
grh - > dgid = old_grh - > sgid ;
2018-11-28 21:19:15 +03:00
ohdr = & hdr . ibh . u . l . oth ;
2015-07-30 22:17:43 +03:00
lrh0 = HFI1_LRH_GRH ;
hwords + = sizeof ( struct ib_grh ) / sizeof ( u32 ) ;
} else {
2018-11-28 21:19:15 +03:00
ohdr = & hdr . ibh . u . oth ;
2015-07-30 22:17:43 +03:00
lrh0 = HFI1_LRH_BTH ;
}
lrh0 | = ( sc5 & 0xf ) < < 12 | sl < < 4 ;
bth0 = pkey | ( IB_OPCODE_CNP < < 24 ) ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
2017-04-09 20:16:28 +03:00
ohdr - > bth [ 1 ] = cpu_to_be32 ( remote_qpn | ( 1 < < IB_BECN_SHIFT ) ) ;
2015-07-30 22:17:43 +03:00
ohdr - > bth [ 2 ] = 0 ; /* PSN 0 */
2018-11-28 21:19:15 +03:00
hfi1_make_ib_hdr ( & hdr . ibh , lrh0 , hwords + SIZE_OF_CRC , dlid , slid ) ;
2015-07-30 22:17:43 +03:00
plen = 2 /* PBC */ + hwords ;
2017-05-12 19:19:55 +03:00
pbc_flags | = ( ib_is_sc5 ( sc5 ) < < PBC_DC_INFO_SHIFT ) ;
2015-07-30 22:17:43 +03:00
vl = sc_to_vlt ( ppd - > dd , sc5 ) ;
pbc = create_pbc ( ppd , pbc_flags , qp - > srate_mbps , vl , plen ) ;
if ( ctxt ) {
pbuf = sc_buffer_alloc ( ctxt , plen , NULL , NULL ) ;
2019-06-14 19:33:06 +03:00
if ( ! IS_ERR_OR_NULL ( pbuf ) ) {
2018-11-28 21:19:15 +03:00
trace_pio_output_ibhdr ( ppd - > dd , & hdr , sc5 ) ;
2015-07-30 22:17:43 +03:00
ppd - > dd - > pio_inline_send ( ppd - > dd , pbuf , pbc ,
& hdr , hwords ) ;
2018-11-28 21:19:15 +03:00
}
2015-07-30 22:17:43 +03:00
}
}
/*
* opa_smp_check ( ) - Do the regular pkey checking , and the additional
2016-10-01 06:11:09 +03:00
* checks for SMPs specified in OPAv1 rev 1.0 , 9 / 19 / 2016 update , section
* 9.10 .25 ( " SMA Packet Checks " ) .
2015-07-30 22:17:43 +03:00
*
* Note that :
* - Checks are done using the pkey directly from the packet ' s BTH ,
* and specifically _not_ the pkey that we attach to the completion ,
* which may be different .
* - These checks are specifically for " non-local " SMPs ( i . e . , SMPs
* which originated on another node ) . SMPs which are sent from , and
* destined to this node are checked in opa_local_smp_check ( ) .
*
* At the point where opa_smp_check ( ) is called , we know :
* - destination QP is QP0
*
* opa_smp_check ( ) returns 0 if all checks succeed , 1 otherwise .
*/
static int opa_smp_check ( struct hfi1_ibport * ibp , u16 pkey , u8 sc5 ,
2016-01-20 01:42:28 +03:00
struct rvt_qp * qp , u16 slid , struct opa_smp * smp )
2015-07-30 22:17:43 +03:00
{
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
/*
* I don ' t think it ' s possible for us to get here with sc ! = 0xf ,
* but check it to be certain .
*/
if ( sc5 ! = 0xf )
return 1 ;
if ( rcv_pkey_check ( ppd , pkey , sc5 , slid ) )
return 1 ;
/*
* At this point we know ( and so don ' t need to check again ) that
* the pkey is either LIM_MGMT_P_KEY , or FULL_MGMT_P_KEY
* ( see ingress_pkey_check ) .
*/
if ( smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE & &
smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_LID_ROUTED ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
/*
* SMPs fall into one of four ( disjoint ) categories :
2016-10-01 06:11:09 +03:00
* SMA request , SMA response , SMA trap , or SMA trap repress .
* Our response depends , in part , on which type of SMP we ' re
* processing .
2015-07-30 22:17:43 +03:00
*
2016-10-01 06:11:09 +03:00
* If this is an SMA response , skip the check here .
*
* If this is an SMA request or SMA trap repress :
2015-07-30 22:17:43 +03:00
* - pkey ! = FULL_MGMT_P_KEY = >
* increment port recv constraint errors , drop MAD
2016-10-01 06:11:09 +03:00
*
* Otherwise :
* - accept if the port is running an SM
* - drop MAD if it ' s an SMA trap
* - pkey = = FULL_MGMT_P_KEY = >
* reply with unsupported method
* - pkey ! = FULL_MGMT_P_KEY = >
* increment port recv constraint errors , drop MAD
2015-07-30 22:17:43 +03:00
*/
switch ( smp - > method ) {
2016-10-01 06:11:09 +03:00
case IB_MGMT_METHOD_GET_RESP :
case IB_MGMT_METHOD_REPORT_RESP :
break ;
2015-07-30 22:17:43 +03:00
case IB_MGMT_METHOD_GET :
case IB_MGMT_METHOD_SET :
case IB_MGMT_METHOD_REPORT :
case IB_MGMT_METHOD_TRAP_REPRESS :
if ( pkey ! = FULL_MGMT_P_KEY ) {
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
}
break ;
2016-10-01 06:11:09 +03:00
default :
2016-01-20 01:42:39 +03:00
if ( ibp - > rvp . port_cap_flags & IB_PORT_SM )
2015-07-30 22:17:43 +03:00
return 0 ;
2016-10-01 06:11:09 +03:00
if ( smp - > method = = IB_MGMT_METHOD_TRAP )
return 1 ;
2015-07-30 22:17:43 +03:00
if ( pkey = = FULL_MGMT_P_KEY ) {
smp - > status | = IB_SMP_UNSUP_METHOD ;
return 0 ;
}
2016-10-01 06:11:09 +03:00
ingress_pkey_table_fail ( ppd , pkey , slid ) ;
return 1 ;
2015-07-30 22:17:43 +03:00
}
return 0 ;
}
/**
* hfi1_ud_rcv - receive an incoming UD packet
2021-01-26 15:47:29 +03:00
* @ packet : the packet structure
2015-07-30 22:17:43 +03:00
*
* This is called from qp_rcv ( ) to process an incoming UD packet
* for the given QP .
* Called at interrupt level .
*/
void hfi1_ud_rcv ( struct hfi1_packet * packet )
{
u32 hdrsize = packet - > hlen ;
struct ib_wc wc ;
u32 src_qp ;
2017-05-12 19:20:20 +03:00
u16 pkey ;
2015-07-30 22:17:43 +03:00
int mgmt_pkey_idx = - 1 ;
2017-02-08 16:26:25 +03:00
struct hfi1_ibport * ibp = rcd_to_iport ( packet - > rcd ) ;
2016-07-25 23:40:28 +03:00
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
2017-08-04 23:53:58 +03:00
void * data = packet - > payload ;
2015-07-30 22:17:43 +03:00
u32 tlen = packet - > tlen ;
2016-01-20 01:42:28 +03:00
struct rvt_qp * qp = packet - > qp ;
2017-08-04 23:54:23 +03:00
u8 sc5 = packet - > sc ;
2017-05-12 19:20:20 +03:00
u8 sl_from_sc ;
u8 opcode = packet - > opcode ;
u8 sl = packet - > sl ;
u32 dlid = packet - > dlid ;
u32 slid = packet - > slid ;
2017-08-04 23:54:23 +03:00
u8 extra_bytes ;
2018-05-16 04:28:15 +03:00
u8 l4 = 0 ;
2017-08-04 23:54:23 +03:00
bool dlid_is_permissive ;
bool slid_is_permissive ;
2018-05-16 04:28:15 +03:00
bool solicited = false ;
2015-07-30 22:17:43 +03:00
2017-08-04 23:54:23 +03:00
extra_bytes = packet - > pad + packet - > extra_byte + ( SIZE_OF_CRC < < 2 ) ;
if ( packet - > etype = = RHF_RCV_TYPE_BYPASS ) {
u32 permissive_lid =
opa_get_lid ( be32_to_cpu ( OPA_LID_PERMISSIVE ) , 16 B ) ;
2018-05-16 04:28:15 +03:00
l4 = hfi1_16B_get_l4 ( packet - > hdr ) ;
2017-08-04 23:54:23 +03:00
pkey = hfi1_16B_get_pkey ( packet - > hdr ) ;
dlid_is_permissive = ( dlid = = permissive_lid ) ;
slid_is_permissive = ( slid = = permissive_lid ) ;
} else {
2018-05-16 04:28:15 +03:00
pkey = ib_bth_get_pkey ( packet - > ohdr ) ;
2017-08-04 23:54:23 +03:00
dlid_is_permissive = ( dlid = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) ;
slid_is_permissive = ( slid = = be16_to_cpu ( IB_LID_PERMISSIVE ) ) ;
}
2016-07-25 23:40:28 +03:00
sl_from_sc = ibp - > sc_to_sl [ sc5 ] ;
2015-07-30 22:17:43 +03:00
2018-05-16 04:28:15 +03:00
if ( likely ( l4 ! = OPA_16B_L4_FM ) ) {
src_qp = ib_get_sqpn ( packet - > ohdr ) ;
solicited = ib_bth_is_solicited ( packet - > ohdr ) ;
} else {
src_qp = hfi1_16B_get_src_qpn ( packet - > mgmt ) ;
}
2018-11-28 21:19:15 +03:00
process_ecn ( qp , packet ) ;
2015-07-30 22:17:43 +03:00
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets .
*/
2016-07-25 23:40:28 +03:00
if ( unlikely ( tlen < ( hdrsize + extra_bytes ) ) )
2015-07-30 22:17:43 +03:00
goto drop ;
2016-07-25 23:40:28 +03:00
tlen - = hdrsize + extra_bytes ;
2015-07-30 22:17:43 +03:00
/*
* Check that the permissive LID is only used on QP0
* and the QKEY matches ( see 9.6 .1 .4 .1 and 9.6 .1 .5 .1 ) .
*/
if ( qp - > ibqp . qp_num ) {
2017-08-04 23:54:23 +03:00
if ( unlikely ( dlid_is_permissive | | slid_is_permissive ) )
2015-07-30 22:17:43 +03:00
goto drop ;
if ( qp - > ibqp . qp_num > 1 ) {
if ( unlikely ( rcv_pkey_check ( ppd , pkey , sc5 , slid ) ) ) {
/*
* Traps will not be sent for packets dropped
* by the HW . This is fine , as sending trap
* for invalid pkeys is optional according to
* IB spec ( release 1.3 , section 10.9 .4 )
*/
2017-05-30 03:22:01 +03:00
hfi1_bad_pkey ( ibp ,
pkey , sl ,
src_qp , qp - > ibqp . qp_num ,
slid , dlid ) ;
2015-07-30 22:17:43 +03:00
return ;
}
} else {
/* GSI packet */
mgmt_pkey_idx = hfi1_lookup_pkey_idx ( ibp , pkey ) ;
if ( mgmt_pkey_idx < 0 )
goto drop ;
}
2018-05-16 04:28:15 +03:00
if ( unlikely ( l4 ! = OPA_16B_L4_FM & &
ib_get_qkey ( packet - > ohdr ) ! = qp - > qkey ) )
return ; /* Silent drop */
2017-05-30 03:22:01 +03:00
2015-07-30 22:17:43 +03:00
/* Drop invalid MAD packets (see 13.5.3.1). */
if ( unlikely ( qp - > ibqp . qp_num = = 1 & &
2016-07-25 23:40:28 +03:00
( tlen > 2048 | | ( sc5 = = 0xF ) ) ) )
2015-07-30 22:17:43 +03:00
goto drop ;
} else {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp * smp = ( struct opa_smp * ) data ;
if ( opa_smp_check ( ibp , pkey , sc5 , qp , slid , smp ) )
goto drop ;
if ( tlen > 2048 )
goto drop ;
2017-08-04 23:54:23 +03:00
if ( ( dlid_is_permissive | | slid_is_permissive ) & &
2015-07-30 22:17:43 +03:00
smp - > mgmt_class ! = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE )
goto drop ;
/* look up SMI pkey */
mgmt_pkey_idx = hfi1_lookup_pkey_idx ( ibp , pkey ) ;
if ( mgmt_pkey_idx < 0 )
goto drop ;
}
if ( qp - > ibqp . qp_num > 1 & &
opcode = = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE ) {
2018-05-16 04:28:15 +03:00
wc . ex . imm_data = packet - > ohdr - > u . ud . imm_data ;
2015-07-30 22:17:43 +03:00
wc . wc_flags = IB_WC_WITH_IMM ;
} else if ( opcode = = IB_OPCODE_UD_SEND_ONLY ) {
wc . ex . imm_data = 0 ;
wc . wc_flags = 0 ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
goto drop ;
2016-02-15 07:22:00 +03:00
}
2015-07-30 22:17:43 +03:00
/*
* A GRH is expected to precede the data even if not
* present on the wire .
*/
wc . byte_len = tlen + sizeof ( struct ib_grh ) ;
/*
* Get the next work request entry to find where to put the data .
*/
2016-02-15 07:22:00 +03:00
if ( qp - > r_flags & RVT_R_REUSE_SGE ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags & = ~ RVT_R_REUSE_SGE ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
int ret ;
2018-05-02 16:44:03 +03:00
ret = rvt_get_rwqe ( qp , false ) ;
2015-07-30 22:17:43 +03:00
if ( ret < 0 ) {
2017-02-08 16:27:01 +03:00
rvt_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
2015-07-30 22:17:43 +03:00
return ;
}
if ( ! ret ) {
if ( qp - > ibqp . qp_num = = 0 )
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_vl15_dropped + + ;
2015-07-30 22:17:43 +03:00
return ;
}
}
/* Silently drop packets which are too big. */
if ( unlikely ( wc . byte_len > qp - > r_len ) ) {
2016-01-20 01:43:01 +03:00
qp - > r_flags | = RVT_R_REUSE_SGE ;
2015-07-30 22:17:43 +03:00
goto drop ;
}
2017-05-12 19:20:20 +03:00
if ( packet - > grh ) {
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , packet - > grh ,
sizeof ( struct ib_grh ) , true , false ) ;
2017-08-04 23:54:23 +03:00
wc . wc_flags | = IB_WC_GRH ;
} else if ( packet - > etype = = RHF_RCV_TYPE_BYPASS ) {
struct ib_grh grh ;
/*
* Assuming we only created 16 B on the send side
* if we want to use large LIDs , since GRH was stripped
* out when creating 16 B , add back the GRH here .
*/
hfi1_make_ext_grh ( packet , & grh , slid , dlid ) ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , & grh ,
sizeof ( struct ib_grh ) , true , false ) ;
2015-07-30 22:17:43 +03:00
wc . wc_flags | = IB_WC_GRH ;
2016-02-15 07:22:00 +03:00
} else {
2017-02-08 16:27:37 +03:00
rvt_skip_sge ( & qp - > r_sge , sizeof ( struct ib_grh ) , true ) ;
2016-02-15 07:22:00 +03:00
}
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , data , wc . byte_len - sizeof ( struct ib_grh ) ,
true , false ) ;
2016-01-20 01:43:44 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2016-01-20 01:43:01 +03:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 22:17:43 +03:00
return ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . opcode = IB_WC_RECV ;
wc . vendor_err = 0 ;
wc . qp = & qp - > ibqp ;
wc . src_qp = src_qp ;
if ( qp - > ibqp . qp_type = = IB_QPT_GSI | |
qp - > ibqp . qp_type = = IB_QPT_SMI ) {
if ( mgmt_pkey_idx < 0 ) {
if ( net_ratelimit ( ) ) {
struct hfi1_devdata * dd = ppd - > dd ;
dd_dev_err ( dd , " QP type %d mgmt_pkey_idx < 0 and packet not dropped??? \n " ,
qp - > ibqp . qp_type ) ;
mgmt_pkey_idx = 0 ;
}
}
wc . pkey_index = ( unsigned ) mgmt_pkey_idx ;
2016-02-15 07:22:00 +03:00
} else {
2015-07-30 22:17:43 +03:00
wc . pkey_index = 0 ;
2016-02-15 07:22:00 +03:00
}
2017-08-04 23:54:23 +03:00
if ( slid_is_permissive )
slid = be32_to_cpu ( OPA_LID_PERMISSIVE ) ;
2017-11-06 17:39:22 +03:00
wc . slid = slid & U16_MAX ;
2016-07-25 23:40:28 +03:00
wc . sl = sl_from_sc ;
2015-07-30 22:17:43 +03:00
/*
* Save the LMC lower bits if the destination LID is a unicast LID .
*/
2017-08-04 23:54:23 +03:00
wc . dlid_path_bits = hfi1_check_mcast ( dlid ) ? 0 :
2015-07-30 22:17:43 +03:00
dlid & ( ( 1 < < ppd_from_ibp ( ibp ) - > lmc ) - 1 ) ;
wc . port_num = qp - > port_num ;
/* Signal completion event if the solicited bit is set. */
2019-06-28 21:21:52 +03:00
rvt_recv_cq ( qp , & wc , solicited ) ;
2015-07-30 22:17:43 +03:00
return ;
drop :
2016-01-20 01:42:39 +03:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 22:17:43 +03:00
}