2015-07-30 22:17:43 +03:00
/*
2018-06-04 21:44:02 +03:00
* Copyright ( c ) 2015 - 2018 Intel Corporation .
2015-07-30 22:17:43 +03:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
# include <linux/spinlock.h>
# include "hfi.h"
# include "mad.h"
# include "qp.h"
2016-02-14 23:44:34 +03:00
# include "verbs_txreq.h"
2016-02-14 23:44:43 +03:00
# include "trace.h"
2015-07-30 22:17:43 +03:00
static int gid_ok ( union ib_gid * gid , __be64 gid_prefix , __be64 id )
{
return ( gid - > global . interface_id = = id & &
( gid - > global . subnet_prefix = = gid_prefix | |
gid - > global . subnet_prefix = = IB_DEFAULT_GID_PREFIX ) ) ;
}
/*
*
* This should be called with the QP r_lock held .
*
* The s_lock will be acquired around the hfi1_migrate_qp ( ) call .
*/
2017-05-12 19:20:20 +03:00
int hfi1_ruc_check_hdr ( struct hfi1_ibport * ibp , struct hfi1_packet * packet )
2015-07-30 22:17:43 +03:00
{
__be64 guid ;
unsigned long flags ;
2017-05-12 19:20:20 +03:00
struct rvt_qp * qp = packet - > qp ;
2017-04-29 21:41:28 +03:00
u8 sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( & qp - > remote_ah_attr ) ] ;
2017-05-12 19:20:20 +03:00
u32 dlid = packet - > dlid ;
u32 slid = packet - > slid ;
u32 sl = packet - > sl ;
2018-02-01 21:46:23 +03:00
bool migrated = packet - > migrated ;
u16 pkey = packet - > pkey ;
2017-05-12 19:20:20 +03:00
if ( qp - > s_mig_state = = IB_MIG_ARMED & & migrated ) {
if ( ! packet - > grh ) {
2017-08-04 23:54:10 +03:00
if ( ( rdma_ah_get_ah_flags ( & qp - > alt_ah_attr ) &
IB_AH_GRH ) & &
( packet - > etype ! = RHF_RCV_TYPE_BYPASS ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
} else {
2017-04-29 21:41:28 +03:00
const struct ib_global_route * grh ;
if ( ! ( rdma_ah_get_ah_flags ( & qp - > alt_ah_attr ) &
IB_AH_GRH ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2017-04-29 21:41:28 +03:00
grh = rdma_ah_read_grh ( & qp - > alt_ah_attr ) ;
guid = get_sguid ( ibp , grh - > sgid_index ) ;
2017-05-12 19:20:20 +03:00
if ( ! gid_ok ( & packet - > grh - > dgid , ibp - > rvp . gid_prefix ,
2016-01-20 01:42:39 +03:00
guid ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2016-02-15 07:21:52 +03:00
if ( ! gid_ok (
2017-05-12 19:20:20 +03:00
& packet - > grh - > sgid ,
2017-04-29 21:41:28 +03:00
grh - > dgid . global . subnet_prefix ,
grh - > dgid . global . interface_id ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
}
2017-08-04 23:54:10 +03:00
if ( unlikely ( rcv_pkey_check ( ppd_from_ibp ( ibp ) , pkey ,
2017-05-12 19:20:20 +03:00
sc5 , slid ) ) ) {
2017-08-04 23:54:10 +03:00
hfi1_bad_pkey ( ibp , pkey , sl , 0 , qp - > ibqp . qp_num ,
slid , dlid ) ;
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
2017-05-12 19:20:20 +03:00
if ( slid ! = rdma_ah_get_dlid ( & qp - > alt_ah_attr ) | |
2017-04-29 21:41:28 +03:00
ppd_from_ibp ( ibp ) - > port ! =
rdma_ah_get_port_num ( & qp - > alt_ah_attr ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
hfi1_migrate_qp ( qp ) ;
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
} else {
2017-05-12 19:20:20 +03:00
if ( ! packet - > grh ) {
2017-08-04 23:54:10 +03:00
if ( ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) &
IB_AH_GRH ) & &
( packet - > etype ! = RHF_RCV_TYPE_BYPASS ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
} else {
2017-04-29 21:41:28 +03:00
const struct ib_global_route * grh ;
if ( ! ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) &
IB_AH_GRH ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2017-04-29 21:41:28 +03:00
grh = rdma_ah_read_grh ( & qp - > remote_ah_attr ) ;
guid = get_sguid ( ibp , grh - > sgid_index ) ;
2017-05-12 19:20:20 +03:00
if ( ! gid_ok ( & packet - > grh - > dgid , ibp - > rvp . gid_prefix ,
2016-01-20 01:42:39 +03:00
guid ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2016-02-15 07:21:52 +03:00
if ( ! gid_ok (
2017-05-12 19:20:20 +03:00
& packet - > grh - > sgid ,
2017-04-29 21:41:28 +03:00
grh - > dgid . global . subnet_prefix ,
grh - > dgid . global . interface_id ) )
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
}
2017-08-04 23:54:10 +03:00
if ( unlikely ( rcv_pkey_check ( ppd_from_ibp ( ibp ) , pkey ,
2017-05-12 19:20:20 +03:00
sc5 , slid ) ) ) {
2017-08-04 23:54:10 +03:00
hfi1_bad_pkey ( ibp , pkey , sl , 0 , qp - > ibqp . qp_num ,
slid , dlid ) ;
2017-05-12 19:20:20 +03:00
return 1 ;
2015-07-30 22:17:43 +03:00
}
/* Validate the SLID. See Ch. 9.6.1.5 */
2017-05-12 19:20:20 +03:00
if ( ( slid ! = rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ) | |
2015-07-30 22:17:43 +03:00
ppd_from_ibp ( ibp ) - > port ! = qp - > port_num )
2017-05-12 19:20:20 +03:00
return 1 ;
if ( qp - > s_mig_state = = IB_MIG_REARM & & ! migrated )
2015-07-30 22:17:43 +03:00
qp - > s_mig_state = IB_MIG_ARMED ;
}
return 0 ;
}
/**
* hfi1_make_grh - construct a GRH header
* @ ibp : a pointer to the IB port
* @ hdr : a pointer to the GRH header being constructed
* @ grh : the global route address to send to
2017-08-04 23:54:23 +03:00
* @ hwords : size of header after grh being sent in dwords
2015-07-30 22:17:43 +03:00
* @ nwords : the number of 32 bit words of data being sent
*
* Return the size of the header in 32 bit words .
*/
u32 hfi1_make_grh ( struct hfi1_ibport * ibp , struct ib_grh * hdr ,
2017-04-29 21:41:28 +03:00
const struct ib_global_route * grh , u32 hwords , u32 nwords )
2015-07-30 22:17:43 +03:00
{
hdr - > version_tclass_flow =
cpu_to_be32 ( ( IB_GRH_VERSION < < IB_GRH_VERSION_SHIFT ) |
( grh - > traffic_class < < IB_GRH_TCLASS_SHIFT ) |
( grh - > flow_label < < IB_GRH_FLOW_SHIFT ) ) ;
2017-08-04 23:54:23 +03:00
hdr - > paylen = cpu_to_be16 ( ( hwords + nwords ) < < 2 ) ;
2015-07-30 22:17:43 +03:00
/* next_hdr is defined by C8-7 in ch. 8.4.1 */
hdr - > next_hdr = IB_GRH_NEXT_HDR ;
hdr - > hop_limit = grh - > hop_limit ;
/* The SGID is 32-bit aligned. */
2016-01-20 01:42:39 +03:00
hdr - > sgid . global . subnet_prefix = ibp - > rvp . gid_prefix ;
2015-07-30 22:17:43 +03:00
hdr - > sgid . global . interface_id =
2016-10-17 14:19:30 +03:00
grh - > sgid_index < HFI1_GUIDS_PER_PORT ?
get_sguid ( ibp , grh - > sgid_index ) :
get_sguid ( ibp , HFI1_PORT_GUID_INDEX ) ;
2015-07-30 22:17:43 +03:00
hdr - > dgid = grh - > dgid ;
/* GRH header size in 32-bit words. */
return sizeof ( struct ib_grh ) / sizeof ( u32 ) ;
}
2017-08-04 23:54:04 +03:00
# define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
hdr . ibh . u . oth . bth [ 2 ] ) / 4 )
2015-07-30 22:17:43 +03:00
/**
2016-07-25 23:40:16 +03:00
* build_ahg - create ahg in s_ahg
2015-07-30 22:17:43 +03:00
* @ qp : a pointer to QP
* @ npsn : the next PSN for the request / response
*
* This routine handles the AHG by allocating an ahg entry and causing the
* copy of the first middle .
*
* Subsequent middles use the copied entry , editing the
* PSN with 1 or 2 edits .
*/
2016-01-20 01:42:28 +03:00
static inline void build_ahg ( struct rvt_qp * qp , u32 npsn )
2015-07-30 22:17:43 +03:00
{
2016-01-20 01:42:00 +03:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-02-14 23:44:43 +03:00
2018-06-04 21:44:02 +03:00
if ( unlikely ( qp - > s_flags & HFI1_S_AHG_CLEAR ) )
2015-07-30 22:17:43 +03:00
clear_ahg ( qp ) ;
2018-06-04 21:44:02 +03:00
if ( ! ( qp - > s_flags & HFI1_S_AHG_VALID ) ) {
2015-07-30 22:17:43 +03:00
/* first middle that needs copy */
2015-11-10 03:13:59 +03:00
if ( qp - > s_ahgidx < 0 )
2016-01-20 01:42:00 +03:00
qp - > s_ahgidx = sdma_ahg_alloc ( priv - > s_sde ) ;
2015-07-30 22:17:43 +03:00
if ( qp - > s_ahgidx > = 0 ) {
qp - > s_ahgpsn = npsn ;
2016-07-25 23:40:16 +03:00
priv - > s_ahg - > tx_flags | = SDMA_TXREQ_F_AHG_COPY ;
2015-07-30 22:17:43 +03:00
/* save to protect a change in another thread */
2016-07-25 23:40:16 +03:00
priv - > s_ahg - > ahgidx = qp - > s_ahgidx ;
2018-06-04 21:44:02 +03:00
qp - > s_flags | = HFI1_S_AHG_VALID ;
2015-07-30 22:17:43 +03:00
}
} else {
/* subsequent middle after valid */
if ( qp - > s_ahgidx > = 0 ) {
2016-07-25 23:40:16 +03:00
priv - > s_ahg - > tx_flags | = SDMA_TXREQ_F_USE_AHG ;
priv - > s_ahg - > ahgidx = qp - > s_ahgidx ;
priv - > s_ahg - > ahgcount + + ;
priv - > s_ahg - > ahgdesc [ 0 ] =
2015-07-30 22:17:43 +03:00
sdma_build_ahg_descriptor (
( __force u16 ) cpu_to_be16 ( ( u16 ) npsn ) ,
BTH2_OFFSET ,
16 ,
16 ) ;
if ( ( npsn & 0xffff0000 ) ! =
( qp - > s_ahgpsn & 0xffff0000 ) ) {
2016-07-25 23:40:16 +03:00
priv - > s_ahg - > ahgcount + + ;
priv - > s_ahg - > ahgdesc [ 1 ] =
2015-07-30 22:17:43 +03:00
sdma_build_ahg_descriptor (
( __force u16 ) cpu_to_be16 (
( u16 ) ( npsn > > 16 ) ) ,
BTH2_OFFSET ,
0 ,
16 ) ;
}
}
}
}
2017-08-04 23:54:41 +03:00
static inline void hfi1_make_ruc_bth ( struct rvt_qp * qp ,
struct ib_other_headers * ohdr ,
u32 bth0 , u32 bth1 , u32 bth2 )
{
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
ohdr - > bth [ 1 ] = cpu_to_be32 ( bth1 ) ;
ohdr - > bth [ 2 ] = cpu_to_be32 ( bth2 ) ;
}
2018-05-01 15:35:51 +03:00
/**
* hfi1_make_ruc_header_16B - build a 16 B header
* @ qp : the queue pair
* @ ohdr : a pointer to the destination header memory
* @ bth0 : bth0 passed in from the RC / UC builder
* @ bth2 : bth2 passed in from the RC / UC builder
* @ middle : non zero implies indicates ahg " could " be used
* @ ps : the current packet state
*
* This routine may disarm ahg under these situations :
* - packet needs a GRH
* - BECN needed
* - migration state not IB_MIG_MIGRATED
*/
2017-08-04 23:54:41 +03:00
static inline void hfi1_make_ruc_header_16B ( struct rvt_qp * qp ,
struct ib_other_headers * ohdr ,
2019-01-24 17:09:46 +03:00
u32 bth0 , u32 bth1 , u32 bth2 ,
int middle ,
2017-08-04 23:54:41 +03:00
struct hfi1_pkt_state * ps )
{
struct hfi1_qp_priv * priv = qp - > priv ;
struct hfi1_ibport * ibp = ps - > ibp ;
struct hfi1_pportdata * ppd = ppd_from_ibp ( ibp ) ;
u32 slid ;
u16 pkey = hfi1_get_pkey ( ibp , qp - > s_pkey_index ) ;
u8 l4 = OPA_16B_L4_IB_LOCAL ;
2018-02-01 21:46:07 +03:00
u8 extra_bytes = hfi1_get_16b_padding (
( ps - > s_txreq - > hdr_dwords < < 2 ) ,
ps - > s_txreq - > s_cur_size ) ;
2017-08-04 23:54:41 +03:00
u32 nwords = SIZE_OF_CRC + ( ( ps - > s_txreq - > s_cur_size +
extra_bytes + SIZE_OF_LT ) > > 2 ) ;
2018-02-01 21:46:38 +03:00
bool becn = false ;
2017-08-04 23:54:41 +03:00
if ( unlikely ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) & IB_AH_GRH ) & &
hfi1_check_mcast ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ) ) {
struct ib_grh * grh ;
struct ib_global_route * grd =
rdma_ah_retrieve_grh ( & qp - > remote_ah_attr ) ;
/*
* Ensure OPA GIDs are transformed to IB gids
* before creating the GRH .
*/
if ( grd - > sgid_index = = OPA_GID_INDEX )
grd - > sgid_index = 0 ;
grh = & ps - > s_txreq - > phdr . hdr . opah . u . l . grh ;
l4 = OPA_16B_L4_IB_GLOBAL ;
2018-02-01 21:52:35 +03:00
ps - > s_txreq - > hdr_dwords + =
hfi1_make_grh ( ibp , grh , grd ,
ps - > s_txreq - > hdr_dwords - LRH_16B_DWORDS ,
nwords ) ;
2017-08-04 23:54:41 +03:00
middle = 0 ;
}
if ( qp - > s_mig_state = = IB_MIG_MIGRATED )
bth1 | = OPA_BTH_MIG_REQ ;
else
middle = 0 ;
2018-05-01 15:35:51 +03:00
if ( qp - > s_flags & RVT_S_ECN ) {
qp - > s_flags & = ~ RVT_S_ECN ;
/* we recently received a FECN, so return a BECN */
becn = true ;
middle = 0 ;
}
2017-08-04 23:54:41 +03:00
if ( middle )
build_ahg ( qp , bth2 ) ;
else
2018-06-04 21:44:02 +03:00
qp - > s_flags & = ~ HFI1_S_AHG_VALID ;
2017-08-04 23:54:41 +03:00
bth0 | = pkey ;
bth0 | = extra_bytes < < 20 ;
hfi1_make_ruc_bth ( qp , ohdr , bth0 , bth1 , bth2 ) ;
if ( ! ppd - > lid )
slid = be32_to_cpu ( OPA_LID_PERMISSIVE ) ;
else
slid = ppd - > lid |
( rdma_ah_get_path_bits ( & qp - > remote_ah_attr ) &
( ( 1 < < ppd - > lmc ) - 1 ) ) ;
hfi1_make_16b_hdr ( & ps - > s_txreq - > phdr . hdr . opah ,
slid ,
opa_get_lid ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ,
16 B ) ,
2018-02-01 21:46:07 +03:00
( ps - > s_txreq - > hdr_dwords + nwords ) > > 1 ,
2017-08-04 23:54:41 +03:00
pkey , becn , 0 , l4 , priv - > s_sc ) ;
}
2018-05-01 15:35:51 +03:00
/**
* hfi1_make_ruc_header_9B - build a 9 B header
* @ qp : the queue pair
* @ ohdr : a pointer to the destination header memory
* @ bth0 : bth0 passed in from the RC / UC builder
* @ bth2 : bth2 passed in from the RC / UC builder
* @ middle : non zero implies indicates ahg " could " be used
* @ ps : the current packet state
*
* This routine may disarm ahg under these situations :
* - packet needs a GRH
* - BECN needed
* - migration state not IB_MIG_MIGRATED
*/
2017-08-04 23:54:41 +03:00
static inline void hfi1_make_ruc_header_9B ( struct rvt_qp * qp ,
struct ib_other_headers * ohdr ,
2019-01-24 17:09:46 +03:00
u32 bth0 , u32 bth1 , u32 bth2 ,
int middle ,
2017-08-04 23:54:41 +03:00
struct hfi1_pkt_state * ps )
2015-07-30 22:17:43 +03:00
{
2016-01-20 01:42:00 +03:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-02-14 23:44:43 +03:00
struct hfi1_ibport * ibp = ps - > ibp ;
2017-08-04 23:54:41 +03:00
u16 pkey = hfi1_get_pkey ( ibp , qp - > s_pkey_index ) ;
u16 lrh0 = HFI1_LRH_BTH ;
u8 extra_bytes = - ps - > s_txreq - > s_cur_size & 3 ;
u32 nwords = SIZE_OF_CRC + ( ( ps - > s_txreq - > s_cur_size +
extra_bytes ) > > 2 ) ;
2017-04-29 21:41:28 +03:00
if ( unlikely ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) & IB_AH_GRH ) ) {
2017-08-04 23:54:41 +03:00
struct ib_grh * grh = & ps - > s_txreq - > phdr . hdr . ibh . u . l . grh ;
2015-07-30 22:17:43 +03:00
lrh0 = HFI1_LRH_GRH ;
2018-02-01 21:46:07 +03:00
ps - > s_txreq - > hdr_dwords + =
2017-08-04 23:54:41 +03:00
hfi1_make_grh ( ibp , grh ,
rdma_ah_read_grh ( & qp - > remote_ah_attr ) ,
2018-02-01 21:52:35 +03:00
ps - > s_txreq - > hdr_dwords - LRH_9B_DWORDS ,
nwords ) ;
2015-07-30 22:17:43 +03:00
middle = 0 ;
}
2017-04-29 21:41:28 +03:00
lrh0 | = ( priv - > s_sc & 0xf ) < < 12 |
( rdma_ah_get_sl ( & qp - > remote_ah_attr ) & 0xf ) < < 4 ;
2017-08-04 23:54:41 +03:00
2015-07-30 22:17:43 +03:00
if ( qp - > s_mig_state = = IB_MIG_MIGRATED )
bth0 | = IB_BTH_MIG_REQ ;
else
middle = 0 ;
2017-08-04 23:54:41 +03:00
2018-05-01 15:35:51 +03:00
if ( qp - > s_flags & RVT_S_ECN ) {
qp - > s_flags & = ~ RVT_S_ECN ;
/* we recently received a FECN, so return a BECN */
bth1 | = ( IB_BECN_MASK < < IB_BECN_SHIFT ) ;
middle = 0 ;
}
2015-07-30 22:17:43 +03:00
if ( middle )
build_ahg ( qp , bth2 ) ;
else
2018-06-04 21:44:02 +03:00
qp - > s_flags & = ~ HFI1_S_AHG_VALID ;
2017-08-04 23:54:41 +03:00
bth0 | = pkey ;
2015-07-30 22:17:43 +03:00
bth0 | = extra_bytes < < 20 ;
2017-08-04 23:54:41 +03:00
hfi1_make_ruc_bth ( qp , ohdr , bth0 , bth1 , bth2 ) ;
hfi1_make_ib_hdr ( & ps - > s_txreq - > phdr . hdr . ibh ,
lrh0 ,
2018-02-01 21:46:07 +03:00
ps - > s_txreq - > hdr_dwords + nwords ,
2017-08-04 23:54:41 +03:00
opa_get_lid ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) , 9 B ) ,
ppd_from_ibp ( ibp ) - > lid |
rdma_ah_get_path_bits ( & qp - > remote_ah_attr ) ) ;
}
typedef void ( * hfi1_make_ruc_hdr ) ( struct rvt_qp * qp ,
struct ib_other_headers * ohdr ,
2019-01-24 17:09:46 +03:00
u32 bth0 , u32 bth1 , u32 bth2 , int middle ,
2017-08-04 23:54:41 +03:00
struct hfi1_pkt_state * ps ) ;
/* We support only two types - 9B and 16B for now */
static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl [ 2 ] = {
[ HFI1_PKT_TYPE_9B ] = & hfi1_make_ruc_header_9B ,
[ HFI1_PKT_TYPE_16B ] = & hfi1_make_ruc_header_16B
} ;
void hfi1_make_ruc_header ( struct rvt_qp * qp , struct ib_other_headers * ohdr ,
2019-01-24 17:09:46 +03:00
u32 bth0 , u32 bth1 , u32 bth2 , int middle ,
2017-08-04 23:54:41 +03:00
struct hfi1_pkt_state * ps )
{
struct hfi1_qp_priv * priv = qp - > priv ;
/*
* reset s_ahg / AHG fields
*
* This insures that the ahgentry / ahgcount
* are at a non - AHG default to protect
* build_verbs_tx_desc ( ) from using
* an include ahgidx .
*
* build_ahg ( ) will modify as appropriate
* to use the AHG feature .
*/
priv - > s_ahg - > tx_flags = 0 ;
priv - > s_ahg - > ahgcount = 0 ;
priv - > s_ahg - > ahgidx = 0 ;
/* Make the appropriate header */
2019-01-24 17:09:46 +03:00
hfi1_ruc_header_tbl [ priv - > hdr_type ] ( qp , ohdr , bth0 , bth1 , bth2 , middle ,
ps ) ;
2015-07-30 22:17:43 +03:00
}
2015-10-26 17:28:35 +03:00
/* when sending, force a reschedule every one of these periods */
# define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
2017-05-04 15:14:10 +03:00
/**
* schedule_send_yield - test for a yield required for QP send engine
* @ timeout : Final time for timeout slice for jiffies
* @ qp : a pointer to QP
* @ ps : a pointer to a structure with commonly lookup values for
* the the send engine progress
*
* This routine checks if the time slice for the QP has expired
* for RC QPs , if so an additional work entry is queued . At this
* point , other QPs have an opportunity to be scheduled . It
* returns true if a yield is required , otherwise , false
* is returned .
*/
static bool schedule_send_yield ( struct rvt_qp * qp ,
struct hfi1_pkt_state * ps )
{
2017-07-24 17:45:37 +03:00
ps - > pkts_sent = true ;
2017-05-04 15:14:10 +03:00
if ( unlikely ( time_after ( jiffies , ps - > timeout ) ) ) {
if ( ! ps - > in_thread | |
workqueue_congested ( ps - > cpu , ps - > ppd - > hfi1_wq ) ) {
spin_lock_irqsave ( & qp - > s_lock , ps - > flags ) ;
qp - > s_flags & = ~ RVT_S_BUSY ;
hfi1_schedule_send ( qp ) ;
spin_unlock_irqrestore ( & qp - > s_lock , ps - > flags ) ;
this_cpu_inc ( * ps - > ppd - > dd - > send_schedule ) ;
trace_hfi1_rc_expired_time_slice ( qp , true ) ;
return true ;
}
cond_resched ( ) ;
this_cpu_inc ( * ps - > ppd - > dd - > send_schedule ) ;
ps - > timeout = jiffies + ps - > timeout_int ;
}
trace_hfi1_rc_expired_time_slice ( qp , false ) ;
return false ;
}
2017-04-09 20:16:35 +03:00
void hfi1_do_send_from_rvt ( struct rvt_qp * qp )
{
hfi1_do_send ( qp , false ) ;
}
2016-01-20 01:43:33 +03:00
void _hfi1_do_send ( struct work_struct * work )
{
2018-09-28 17:17:09 +03:00
struct iowait_work * w = container_of ( work , struct iowait_work , iowork ) ;
struct rvt_qp * qp = iowait_to_qp ( w - > iow ) ;
2016-01-20 01:43:33 +03:00
2017-04-09 20:16:35 +03:00
hfi1_do_send ( qp , true ) ;
2016-01-20 01:43:33 +03:00
}
2015-07-30 22:17:43 +03:00
/**
* hfi1_do_send - perform a send on a QP
* @ work : contains a pointer to the QP
2017-04-09 20:16:35 +03:00
* @ in_thread : true if in a workqueue thread
2015-07-30 22:17:43 +03:00
*
* Process entries in the send work queue until credit or queue is
2016-09-25 17:42:08 +03:00
* exhausted . Only allow one CPU to send a packet per QP .
2015-07-30 22:17:43 +03:00
* Otherwise , two threads could send packets out of order .
*/
2017-04-09 20:16:35 +03:00
void hfi1_do_send ( struct rvt_qp * qp , bool in_thread )
2015-07-30 22:17:43 +03:00
{
2015-11-11 08:34:37 +03:00
struct hfi1_pkt_state ps ;
2016-02-04 01:34:23 +03:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-02-14 23:44:43 +03:00
int ( * make_req ) ( struct rvt_qp * qp , struct hfi1_pkt_state * ps ) ;
2015-07-30 22:17:43 +03:00
2015-11-11 08:34:37 +03:00
ps . dev = to_idev ( qp - > ibqp . device ) ;
ps . ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ps . ppd = ppd_from_ibp ( ps . ibp ) ;
2017-05-04 15:14:10 +03:00
ps . in_thread = in_thread ;
2018-09-28 17:17:09 +03:00
ps . wait = iowait_get_ib_work ( & priv - > s_iowait ) ;
2017-05-04 15:14:10 +03:00
trace_hfi1_rc_do_send ( qp , in_thread ) ;
2015-11-11 08:34:37 +03:00
2016-02-04 01:34:23 +03:00
switch ( qp - > ibqp . qp_type ) {
case IB_QPT_RC :
2017-04-29 21:41:28 +03:00
if ( ! loopback & & ( ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) &
~ ( ( 1 < < ps . ppd - > lmc ) - 1 ) ) = =
ps . ppd - > lid ) ) {
2018-09-26 20:44:52 +03:00
rvt_ruc_loopback ( qp ) ;
2016-02-04 01:34:23 +03:00
return ;
}
2015-07-30 22:17:43 +03:00
make_req = hfi1_make_rc_req ;
2017-05-04 15:14:10 +03:00
ps . timeout_int = qp - > timeout_jiffies ;
2016-02-04 01:34:23 +03:00
break ;
case IB_QPT_UC :
2017-04-29 21:41:28 +03:00
if ( ! loopback & & ( ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) &
~ ( ( 1 < < ps . ppd - > lmc ) - 1 ) ) = =
ps . ppd - > lid ) ) {
2018-09-26 20:44:52 +03:00
rvt_ruc_loopback ( qp ) ;
2016-02-04 01:34:23 +03:00
return ;
}
2015-07-30 22:17:43 +03:00
make_req = hfi1_make_uc_req ;
2017-05-04 15:14:10 +03:00
ps . timeout_int = SEND_RESCHED_TIMEOUT ;
2016-02-04 01:34:23 +03:00
break ;
default :
2015-07-30 22:17:43 +03:00
make_req = hfi1_make_ud_req ;
2017-05-04 15:14:10 +03:00
ps . timeout_int = SEND_RESCHED_TIMEOUT ;
2016-02-04 01:34:23 +03:00
}
2015-07-30 22:17:43 +03:00
2016-04-12 20:46:10 +03:00
spin_lock_irqsave ( & qp - > s_lock , ps . flags ) ;
2015-07-30 22:17:43 +03:00
/* Return if we are already busy processing a work request. */
if ( ! hfi1_send_ok ( qp ) ) {
2018-09-28 17:17:09 +03:00
if ( qp - > s_flags & HFI1_S_ANY_WAIT_IO )
iowait_set_flag ( & priv - > s_iowait , IOWAIT_PENDING_IB ) ;
2016-04-12 20:46:10 +03:00
spin_unlock_irqrestore ( & qp - > s_lock , ps . flags ) ;
2015-07-30 22:17:43 +03:00
return ;
}
2016-01-20 01:43:01 +03:00
qp - > s_flags | = RVT_S_BUSY ;
2015-07-30 22:17:43 +03:00
2017-05-04 15:14:10 +03:00
ps . timeout_int = ps . timeout_int / 8 ;
ps . timeout = jiffies + ps . timeout_int ;
ps . cpu = priv - > s_sde ? priv - > s_sde - > cpu :
2016-02-04 01:34:23 +03:00
cpumask_first ( cpumask_of_node ( ps . ppd - > dd - > node ) ) ;
2017-07-24 17:45:37 +03:00
ps . pkts_sent = false ;
2017-05-04 15:14:10 +03:00
2016-02-14 23:45:18 +03:00
/* insure a pre-built packet is handled */
2018-09-28 17:17:09 +03:00
ps . s_txreq = get_waiting_verbs_txreq ( ps . wait ) ;
2015-07-30 22:17:43 +03:00
do {
/* Check for a constructed packet to be sent. */
2018-02-01 21:46:07 +03:00
if ( ps . s_txreq ) {
2016-04-12 20:46:10 +03:00
spin_unlock_irqrestore ( & qp - > s_lock , ps . flags ) ;
2015-07-30 22:17:43 +03:00
/*
* If the packet cannot be sent now , return and
2016-09-25 17:42:08 +03:00
* the send engine will be woken up later .
2015-07-30 22:17:43 +03:00
*/
2015-11-11 08:34:37 +03:00
if ( hfi1_verbs_send ( qp , & ps ) )
2016-02-14 23:10:04 +03:00
return ;
2018-09-28 17:17:09 +03:00
2016-02-14 23:10:04 +03:00
/* allow other tasks to run */
2017-05-04 15:14:10 +03:00
if ( schedule_send_yield ( qp , & ps ) )
return ;
2016-04-12 20:46:10 +03:00
spin_lock_irqsave ( & qp - > s_lock , ps . flags ) ;
2015-10-26 17:28:35 +03:00
}
2016-02-14 23:44:43 +03:00
} while ( make_req ( qp , & ps ) ) ;
2017-07-24 17:45:37 +03:00
iowait_starve_clear ( ps . pkts_sent , & priv - > s_iowait ) ;
2016-04-12 20:46:10 +03:00
spin_unlock_irqrestore ( & qp - > s_lock , ps . flags ) ;
2015-07-30 22:17:43 +03:00
}