2015-07-30 15:17:43 -04:00
/*
2017-04-09 10:16:35 -07:00
* Copyright ( c ) 2015 - 2017 Intel Corporation .
2015-07-30 15:17:43 -04:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
# include <linux/spinlock.h>
# include "hfi.h"
# include "mad.h"
# include "qp.h"
2016-02-14 12:44:34 -08:00
# include "verbs_txreq.h"
2016-02-14 12:44:43 -08:00
# include "trace.h"
2015-07-30 15:17:43 -04:00
/*
* Validate a RWQE and fill in the SGE state .
* Return 1 if OK .
*/
2016-01-19 14:42:28 -08:00
static int init_sge ( struct rvt_qp * qp , struct rvt_rwqe * wqe )
2015-07-30 15:17:43 -04:00
{
int i , j , ret ;
struct ib_wc wc ;
2016-01-19 14:41:55 -08:00
struct rvt_lkey_table * rkt ;
2016-01-19 14:41:50 -08:00
struct rvt_pd * pd ;
2016-01-19 14:42:28 -08:00
struct rvt_sge_state * ss ;
2015-07-30 15:17:43 -04:00
2016-01-19 14:42:28 -08:00
rkt = & to_idev ( qp - > ibqp . device ) - > rdi . lkey_table ;
2016-01-19 14:41:50 -08:00
pd = ibpd_to_rvtpd ( qp - > ibqp . srq ? qp - > ibqp . srq - > pd : qp - > ibqp . pd ) ;
2015-07-30 15:17:43 -04:00
ss = & qp - > r_sge ;
ss - > sg_list = qp - > r_sg_list ;
qp - > r_len = 0 ;
for ( i = j = 0 ; i < wqe - > num_sge ; i + + ) {
if ( wqe - > sg_list [ i ] . length = = 0 )
continue ;
/* Check LKEY */
2017-07-29 08:43:43 -07:00
ret = rvt_lkey_ok ( rkt , pd , j ? & ss - > sg_list [ j - 1 ] : & ss - > sge ,
NULL , & wqe - > sg_list [ i ] ,
IB_ACCESS_LOCAL_WRITE ) ;
if ( unlikely ( ret < = 0 ) )
2015-07-30 15:17:43 -04:00
goto bad_lkey ;
qp - > r_len + = wqe - > sg_list [ i ] . length ;
j + + ;
}
ss - > num_sge = j ;
ss - > total_len = qp - > r_len ;
ret = 1 ;
goto bail ;
bad_lkey :
while ( j ) {
2016-01-19 14:42:28 -08:00
struct rvt_sge * sge = - - j ? & ss - > sg_list [ j - 1 ] : & ss - > sge ;
2015-07-30 15:17:43 -04:00
2016-01-19 14:42:28 -08:00
rvt_put_mr ( sge - > mr ) ;
2015-07-30 15:17:43 -04:00
}
ss - > num_sge = 0 ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . wr_id = wqe - > wr_id ;
wc . status = IB_WC_LOC_PROT_ERR ;
wc . opcode = IB_WC_RECV ;
wc . qp = & qp - > ibqp ;
/* Signal solicited completion event. */
2016-01-19 14:43:22 -08:00
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
2015-07-30 15:17:43 -04:00
ret = 0 ;
bail :
return ret ;
}
/**
2016-01-19 14:43:44 -08:00
* hfi1_rvt_get_rwqe - copy the next RWQE into the QP ' s RWQE
2015-07-30 15:17:43 -04:00
* @ qp : the QP
* @ wr_id_only : update qp - > r_wr_id only , not qp - > r_sge
*
* Return - 1 if there is a local error , 0 if no RWQE is available ,
* otherwise return 1.
*
* Can be called from interrupt level .
*/
2016-01-19 14:43:44 -08:00
int hfi1_rvt_get_rwqe ( struct rvt_qp * qp , int wr_id_only )
2015-07-30 15:17:43 -04:00
{
unsigned long flags ;
2016-01-19 14:42:28 -08:00
struct rvt_rq * rq ;
struct rvt_rwq * wq ;
2016-01-19 14:42:33 -08:00
struct rvt_srq * srq ;
2016-01-19 14:42:28 -08:00
struct rvt_rwqe * wqe ;
2015-07-30 15:17:43 -04:00
void ( * handler ) ( struct ib_event * , void * ) ;
u32 tail ;
int ret ;
if ( qp - > ibqp . srq ) {
2016-01-19 14:42:33 -08:00
srq = ibsrq_to_rvtsrq ( qp - > ibqp . srq ) ;
2015-07-30 15:17:43 -04:00
handler = srq - > ibsrq . event_handler ;
rq = & srq - > rq ;
} else {
srq = NULL ;
handler = NULL ;
rq = & qp - > r_rq ;
}
spin_lock_irqsave ( & rq - > lock , flags ) ;
2016-01-19 14:43:33 -08:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_RECV_OK ) ) {
2015-07-30 15:17:43 -04:00
ret = 0 ;
goto unlock ;
}
wq = rq - > wq ;
tail = wq - > tail ;
/* Validate tail before using it since it is user writable. */
if ( tail > = rq - > size )
tail = 0 ;
if ( unlikely ( tail = = wq - > head ) ) {
ret = 0 ;
goto unlock ;
}
/* Make sure entry is read after head index is read. */
smp_rmb ( ) ;
2016-01-19 14:43:44 -08:00
wqe = rvt_get_rwqe_ptr ( rq , tail ) ;
2015-07-30 15:17:43 -04:00
/*
* Even though we update the tail index in memory , the verbs
* consumer is not supposed to post more entries until a
* completion is generated .
*/
if ( + + tail > = rq - > size )
tail = 0 ;
wq - > tail = tail ;
if ( ! wr_id_only & & ! init_sge ( qp , wqe ) ) {
ret = - 1 ;
goto unlock ;
}
qp - > r_wr_id = wqe - > wr_id ;
ret = 1 ;
2016-01-19 14:43:01 -08:00
set_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) ;
2015-07-30 15:17:43 -04:00
if ( handler ) {
u32 n ;
/*
* Validate head pointer value and compute
* the number of remaining WQEs .
*/
n = wq - > head ;
if ( n > = rq - > size )
n = 0 ;
if ( n < tail )
n + = rq - > size - tail ;
else
n - = tail ;
if ( n < srq - > limit ) {
struct ib_event ev ;
srq - > limit = 0 ;
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
ev . device = qp - > ibqp . device ;
ev . element . srq = qp - > ibqp . srq ;
ev . event = IB_EVENT_SRQ_LIMIT_REACHED ;
handler ( & ev , srq - > ibsrq . srq_context ) ;
goto bail ;
}
}
unlock :
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
bail :
return ret ;
}
static int gid_ok ( union ib_gid * gid , __be64 gid_prefix , __be64 id )
{
return ( gid - > global . interface_id = = id & &
( gid - > global . subnet_prefix = = gid_prefix | |
gid - > global . subnet_prefix = = IB_DEFAULT_GID_PREFIX ) ) ;
}
/*
*
* This should be called with the QP r_lock held .
*
* The s_lock will be acquired around the hfi1_migrate_qp ( ) call .
*/
2017-05-12 09:20:20 -07:00
int hfi1_ruc_check_hdr ( struct hfi1_ibport * ibp , struct hfi1_packet * packet )
2015-07-30 15:17:43 -04:00
{
__be64 guid ;
unsigned long flags ;
2017-05-12 09:20:20 -07:00
struct rvt_qp * qp = packet - > qp ;
2017-04-29 14:41:28 -04:00
u8 sc5 = ibp - > sl_to_sc [ rdma_ah_get_sl ( & qp - > remote_ah_attr ) ] ;
2017-05-12 09:20:20 -07:00
u32 dlid = packet - > dlid ;
u32 slid = packet - > slid ;
u32 sl = packet - > sl ;
int migrated ;
u32 bth0 , bth1 ;
2015-07-30 15:17:43 -04:00
2017-05-12 09:20:20 -07:00
bth0 = be32_to_cpu ( packet - > ohdr - > bth [ 0 ] ) ;
bth1 = be32_to_cpu ( packet - > ohdr - > bth [ 1 ] ) ;
migrated = bth0 & IB_BTH_MIG_REQ ;
if ( qp - > s_mig_state = = IB_MIG_ARMED & & migrated ) {
if ( ! packet - > grh ) {
2017-04-29 14:41:28 -04:00
if ( rdma_ah_get_ah_flags ( & qp - > alt_ah_attr ) &
IB_AH_GRH )
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
} else {
2017-04-29 14:41:28 -04:00
const struct ib_global_route * grh ;
if ( ! ( rdma_ah_get_ah_flags ( & qp - > alt_ah_attr ) &
IB_AH_GRH ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2017-04-29 14:41:28 -04:00
grh = rdma_ah_read_grh ( & qp - > alt_ah_attr ) ;
guid = get_sguid ( ibp , grh - > sgid_index ) ;
2017-05-12 09:20:20 -07:00
if ( ! gid_ok ( & packet - > grh - > dgid , ibp - > rvp . gid_prefix ,
2016-01-19 14:42:39 -08:00
guid ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2016-02-14 20:21:52 -08:00
if ( ! gid_ok (
2017-05-12 09:20:20 -07:00
& packet - > grh - > sgid ,
2017-04-29 14:41:28 -04:00
grh - > dgid . global . subnet_prefix ,
grh - > dgid . global . interface_id ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
}
2017-05-12 09:20:20 -07:00
if ( unlikely ( rcv_pkey_check ( ppd_from_ibp ( ibp ) , ( u16 ) bth0 ,
sc5 , slid ) ) ) {
2017-05-29 17:22:01 -07:00
hfi1_bad_pkey ( ibp , ( u16 ) bth0 , sl ,
0 , qp - > ibqp . qp_num , slid , dlid ) ;
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
2017-05-12 09:20:20 -07:00
if ( slid ! = rdma_ah_get_dlid ( & qp - > alt_ah_attr ) | |
2017-04-29 14:41:28 -04:00
ppd_from_ibp ( ibp ) - > port ! =
rdma_ah_get_port_num ( & qp - > alt_ah_attr ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
hfi1_migrate_qp ( qp ) ;
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
} else {
2017-05-12 09:20:20 -07:00
if ( ! packet - > grh ) {
2017-04-29 14:41:28 -04:00
if ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) &
IB_AH_GRH )
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
} else {
2017-04-29 14:41:28 -04:00
const struct ib_global_route * grh ;
if ( ! ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) &
IB_AH_GRH ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2017-04-29 14:41:28 -04:00
grh = rdma_ah_read_grh ( & qp - > remote_ah_attr ) ;
guid = get_sguid ( ibp , grh - > sgid_index ) ;
2017-05-12 09:20:20 -07:00
if ( ! gid_ok ( & packet - > grh - > dgid , ibp - > rvp . gid_prefix ,
2016-01-19 14:42:39 -08:00
guid ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2016-02-14 20:21:52 -08:00
if ( ! gid_ok (
2017-05-12 09:20:20 -07:00
& packet - > grh - > sgid ,
2017-04-29 14:41:28 -04:00
grh - > dgid . global . subnet_prefix ,
grh - > dgid . global . interface_id ) )
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
}
2017-05-12 09:20:20 -07:00
if ( unlikely ( rcv_pkey_check ( ppd_from_ibp ( ibp ) , ( u16 ) bth0 ,
sc5 , slid ) ) ) {
2017-05-29 17:22:01 -07:00
hfi1_bad_pkey ( ibp , ( u16 ) bth0 , sl ,
0 , qp - > ibqp . qp_num , slid , dlid ) ;
2017-05-12 09:20:20 -07:00
return 1 ;
2015-07-30 15:17:43 -04:00
}
/* Validate the SLID. See Ch. 9.6.1.5 */
2017-05-12 09:20:20 -07:00
if ( ( slid ! = rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ) | |
2015-07-30 15:17:43 -04:00
ppd_from_ibp ( ibp ) - > port ! = qp - > port_num )
2017-05-12 09:20:20 -07:00
return 1 ;
if ( qp - > s_mig_state = = IB_MIG_REARM & & ! migrated )
2015-07-30 15:17:43 -04:00
qp - > s_mig_state = IB_MIG_ARMED ;
}
return 0 ;
}
/**
* ruc_loopback - handle UC and RC loopback requests
* @ sqp : the sending QP
*
* This is called from hfi1_do_send ( ) to
* forward a WQE addressed to the same HFI .
2016-09-25 07:42:08 -07:00
* Note that although we are single threaded due to the send engine , we still
2015-07-30 15:17:43 -04:00
* have to protect against post_send ( ) . We don ' t have to worry about
* receive interrupts since this is a connected protocol and all packets
* will pass through here .
*/
2016-01-19 14:42:28 -08:00
static void ruc_loopback ( struct rvt_qp * sqp )
2015-07-30 15:17:43 -04:00
{
struct hfi1_ibport * ibp = to_iport ( sqp - > ibqp . device , sqp - > port_num ) ;
2016-01-19 14:42:28 -08:00
struct rvt_qp * qp ;
struct rvt_swqe * wqe ;
struct rvt_sge * sge ;
2015-07-30 15:17:43 -04:00
unsigned long flags ;
struct ib_wc wc ;
u64 sdata ;
atomic64_t * maddr ;
enum ib_wc_status send_status ;
2017-02-08 05:27:31 -08:00
bool release ;
2015-07-30 15:17:43 -04:00
int ret ;
2017-02-08 05:27:31 -08:00
bool copy_last = false ;
2016-07-25 13:38:37 -07:00
int local_ops = 0 ;
2015-07-30 15:17:43 -04:00
rcu_read_lock ( ) ;
/*
* Note that we check the responder QP state after
* checking the requester ' s state .
*/
2016-01-19 14:43:44 -08:00
qp = rvt_lookup_qpn ( ib_to_rvt ( sqp - > ibqp . device ) , & ibp - > rvp ,
sqp - > remote_qpn ) ;
2015-07-30 15:17:43 -04:00
spin_lock_irqsave ( & sqp - > s_lock , flags ) ;
/* Return if we are already busy processing a work request. */
2016-01-19 14:43:01 -08:00
if ( ( sqp - > s_flags & ( RVT_S_BUSY | RVT_S_ANY_WAIT ) ) | |
2016-01-19 14:43:33 -08:00
! ( ib_rvt_state_ops [ sqp - > state ] & RVT_PROCESS_OR_FLUSH_SEND ) )
2015-07-30 15:17:43 -04:00
goto unlock ;
2016-01-19 14:43:01 -08:00
sqp - > s_flags | = RVT_S_BUSY ;
2015-07-30 15:17:43 -04:00
again :
2016-02-14 12:10:04 -08:00
smp_read_barrier_depends ( ) ; /* see post_one_send() */
if ( sqp - > s_last = = ACCESS_ONCE ( sqp - > s_head ) )
2015-07-30 15:17:43 -04:00
goto clr_busy ;
2016-01-19 14:43:33 -08:00
wqe = rvt_get_swqe_ptr ( sqp , sqp - > s_last ) ;
2015-07-30 15:17:43 -04:00
/* Return if it is not OK to start a new work request. */
2016-01-19 14:43:33 -08:00
if ( ! ( ib_rvt_state_ops [ sqp - > state ] & RVT_PROCESS_NEXT_SEND_OK ) ) {
if ( ! ( ib_rvt_state_ops [ sqp - > state ] & RVT_FLUSH_SEND ) )
2015-07-30 15:17:43 -04:00
goto clr_busy ;
/* We are in the error state, flush the work request. */
send_status = IB_WC_WR_FLUSH_ERR ;
goto flush_send ;
}
/*
* We can rely on the entry not changing without the s_lock
* being held until we update s_last .
* We increment s_cur to indicate s_last is in progress .
*/
if ( sqp - > s_last = = sqp - > s_cur ) {
if ( + + sqp - > s_cur > = sqp - > s_size )
sqp - > s_cur = 0 ;
}
spin_unlock_irqrestore ( & sqp - > s_lock , flags ) ;
2016-01-19 14:43:33 -08:00
if ( ! qp | | ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_RECV_OK ) | |
2015-07-30 15:17:43 -04:00
qp - > ibqp . qp_type ! = sqp - > ibqp . qp_type ) {
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_pkt_drops + + ;
2015-07-30 15:17:43 -04:00
/*
* For RC , the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries .
*/
if ( sqp - > ibqp . qp_type = = IB_QPT_RC )
send_status = IB_WC_RETRY_EXC_ERR ;
else
send_status = IB_WC_SUCCESS ;
goto serr ;
}
memset ( & wc , 0 , sizeof ( wc ) ) ;
send_status = IB_WC_SUCCESS ;
2017-02-08 05:27:31 -08:00
release = true ;
2015-07-30 15:17:43 -04:00
sqp - > s_sge . sge = wqe - > sg_list [ 0 ] ;
sqp - > s_sge . sg_list = wqe - > sg_list + 1 ;
sqp - > s_sge . num_sge = wqe - > wr . num_sge ;
sqp - > s_len = wqe - > length ;
switch ( wqe - > wr . opcode ) {
2016-07-25 13:38:37 -07:00
case IB_WR_REG_MR :
goto send_comp ;
case IB_WR_LOCAL_INV :
2016-07-25 13:39:45 -07:00
if ( ! ( wqe - > wr . send_flags & RVT_SEND_COMPLETION_ONLY ) ) {
if ( rvt_invalidate_rkey ( sqp ,
wqe - > wr . ex . invalidate_rkey ) )
send_status = IB_WC_LOC_PROT_ERR ;
local_ops = 1 ;
}
2016-07-25 13:38:37 -07:00
goto send_comp ;
case IB_WR_SEND_WITH_INV :
if ( ! rvt_invalidate_rkey ( qp , wqe - > wr . ex . invalidate_rkey ) ) {
wc . wc_flags = IB_WC_WITH_INVALIDATE ;
wc . ex . invalidate_rkey = wqe - > wr . ex . invalidate_rkey ;
}
goto send ;
2015-07-30 15:17:43 -04:00
case IB_WR_SEND_WITH_IMM :
wc . wc_flags = IB_WC_WITH_IMM ;
wc . ex . imm_data = wqe - > wr . ex . imm_data ;
/* FALLTHROUGH */
case IB_WR_SEND :
2016-07-25 13:38:37 -07:00
send :
2016-01-19 14:43:44 -08:00
ret = hfi1_rvt_get_rwqe ( qp , 0 ) ;
2015-07-30 15:17:43 -04:00
if ( ret < 0 )
goto op_err ;
if ( ! ret )
goto rnr_nak ;
break ;
case IB_WR_RDMA_WRITE_WITH_IMM :
if ( unlikely ( ! ( qp - > qp_access_flags & IB_ACCESS_REMOTE_WRITE ) ) )
goto inv_err ;
wc . wc_flags = IB_WC_WITH_IMM ;
wc . ex . imm_data = wqe - > wr . ex . imm_data ;
2016-01-19 14:43:44 -08:00
ret = hfi1_rvt_get_rwqe ( qp , 1 ) ;
2015-07-30 15:17:43 -04:00
if ( ret < 0 )
goto op_err ;
if ( ! ret )
goto rnr_nak ;
2016-02-03 14:35:49 -08:00
/* skip copy_last set and qp_access_flags recheck */
goto do_write ;
2015-07-30 15:17:43 -04:00
case IB_WR_RDMA_WRITE :
2017-02-08 05:27:31 -08:00
copy_last = rvt_is_user_qp ( qp ) ;
2015-07-30 15:17:43 -04:00
if ( unlikely ( ! ( qp - > qp_access_flags & IB_ACCESS_REMOTE_WRITE ) ) )
goto inv_err ;
2016-02-03 14:35:49 -08:00
do_write :
2015-07-30 15:17:43 -04:00
if ( wqe - > length = = 0 )
2016-03-05 08:49:24 -08:00
break ;
2016-01-19 14:42:28 -08:00
if ( unlikely ( ! rvt_rkey_ok ( qp , & qp - > r_sge . sge , wqe - > length ,
wqe - > rdma_wr . remote_addr ,
wqe - > rdma_wr . rkey ,
IB_ACCESS_REMOTE_WRITE ) ) )
2015-07-30 15:17:43 -04:00
goto acc_err ;
qp - > r_sge . sg_list = NULL ;
qp - > r_sge . num_sge = 1 ;
qp - > r_sge . total_len = wqe - > length ;
break ;
case IB_WR_RDMA_READ :
if ( unlikely ( ! ( qp - > qp_access_flags & IB_ACCESS_REMOTE_READ ) ) )
goto inv_err ;
2016-01-19 14:42:28 -08:00
if ( unlikely ( ! rvt_rkey_ok ( qp , & sqp - > s_sge . sge , wqe - > length ,
wqe - > rdma_wr . remote_addr ,
wqe - > rdma_wr . rkey ,
IB_ACCESS_REMOTE_READ ) ) )
2015-07-30 15:17:43 -04:00
goto acc_err ;
2017-02-08 05:27:31 -08:00
release = false ;
2015-07-30 15:17:43 -04:00
sqp - > s_sge . sg_list = NULL ;
sqp - > s_sge . num_sge = 1 ;
qp - > r_sge . sge = wqe - > sg_list [ 0 ] ;
qp - > r_sge . sg_list = wqe - > sg_list + 1 ;
qp - > r_sge . num_sge = wqe - > wr . num_sge ;
qp - > r_sge . total_len = wqe - > length ;
break ;
case IB_WR_ATOMIC_CMP_AND_SWP :
case IB_WR_ATOMIC_FETCH_AND_ADD :
if ( unlikely ( ! ( qp - > qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ) ) )
goto inv_err ;
2016-01-19 14:42:28 -08:00
if ( unlikely ( ! rvt_rkey_ok ( qp , & qp - > r_sge . sge , sizeof ( u64 ) ,
wqe - > atomic_wr . remote_addr ,
wqe - > atomic_wr . rkey ,
IB_ACCESS_REMOTE_ATOMIC ) ) )
2015-07-30 15:17:43 -04:00
goto acc_err ;
/* Perform atomic OP and save result. */
2016-02-14 20:19:41 -08:00
maddr = ( atomic64_t * ) qp - > r_sge . sge . vaddr ;
2015-10-08 09:16:33 +01:00
sdata = wqe - > atomic_wr . compare_add ;
2016-02-14 20:19:41 -08:00
* ( u64 * ) sqp - > s_sge . sge . vaddr =
2015-07-30 15:17:43 -04:00
( wqe - > wr . opcode = = IB_WR_ATOMIC_FETCH_AND_ADD ) ?
2016-02-14 20:19:41 -08:00
( u64 ) atomic64_add_return ( sdata , maddr ) - sdata :
( u64 ) cmpxchg ( ( u64 * ) qp - > r_sge . sge . vaddr ,
2015-10-08 09:16:33 +01:00
sdata , wqe - > atomic_wr . swap ) ;
2016-01-19 14:42:28 -08:00
rvt_put_mr ( qp - > r_sge . sge . mr ) ;
2015-07-30 15:17:43 -04:00
qp - > r_sge . num_sge = 0 ;
goto send_comp ;
default :
send_status = IB_WC_LOC_QP_OP_ERR ;
goto serr ;
}
sge = & sqp - > s_sge . sge ;
while ( sqp - > s_len ) {
u32 len = sqp - > s_len ;
if ( len > sge - > length )
len = sge - > length ;
if ( len > sge - > sge_length )
len = sge - > sge_length ;
WARN_ON_ONCE ( len = = 0 ) ;
2016-02-03 14:35:49 -08:00
hfi1_copy_sge ( & qp - > r_sge , sge - > vaddr , len , release , copy_last ) ;
2015-07-30 15:17:43 -04:00
sge - > vaddr + = len ;
sge - > length - = len ;
sge - > sge_length - = len ;
if ( sge - > sge_length = = 0 ) {
if ( ! release )
2016-01-19 14:42:28 -08:00
rvt_put_mr ( sge - > mr ) ;
2015-07-30 15:17:43 -04:00
if ( - - sqp - > s_sge . num_sge )
* sge = * sqp - > s_sge . sg_list + + ;
} else if ( sge - > length = = 0 & & sge - > mr - > lkey ) {
2016-01-19 14:41:55 -08:00
if ( + + sge - > n > = RVT_SEGSZ ) {
2015-07-30 15:17:43 -04:00
if ( + + sge - > m > = sge - > mr - > mapsz )
break ;
sge - > n = 0 ;
}
sge - > vaddr =
sge - > mr - > map [ sge - > m ] - > segs [ sge - > n ] . vaddr ;
sge - > length =
sge - > mr - > map [ sge - > m ] - > segs [ sge - > n ] . length ;
}
sqp - > s_len - = len ;
}
if ( release )
2016-01-19 14:43:44 -08:00
rvt_put_ss ( & qp - > r_sge ) ;
2015-07-30 15:17:43 -04:00
2016-01-19 14:43:01 -08:00
if ( ! test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) )
2015-07-30 15:17:43 -04:00
goto send_comp ;
if ( wqe - > wr . opcode = = IB_WR_RDMA_WRITE_WITH_IMM )
wc . opcode = IB_WC_RECV_RDMA_WITH_IMM ;
else
wc . opcode = IB_WC_RECV ;
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . byte_len = wqe - > length ;
wc . qp = & qp - > ibqp ;
wc . src_qp = qp - > remote_qpn ;
2017-04-29 14:41:28 -04:00
wc . slid = rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ;
wc . sl = rdma_ah_get_sl ( & qp - > remote_ah_attr ) ;
2015-07-30 15:17:43 -04:00
wc . port_num = 1 ;
/* Signal completion event if the solicited bit is set. */
2016-01-19 14:43:22 -08:00
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc ,
wqe - > wr . send_flags & IB_SEND_SOLICITED ) ;
2015-07-30 15:17:43 -04:00
send_comp :
spin_lock_irqsave ( & sqp - > s_lock , flags ) ;
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_loop_pkts + + ;
2015-07-30 15:17:43 -04:00
flush_send :
sqp - > s_rnr_retry = sqp - > s_rnr_retry_cnt ;
hfi1_send_complete ( sqp , wqe , send_status ) ;
2016-07-25 13:38:37 -07:00
if ( local_ops ) {
atomic_dec ( & sqp - > local_ops_pending ) ;
local_ops = 0 ;
}
2015-07-30 15:17:43 -04:00
goto again ;
rnr_nak :
/* Handle RNR NAK */
if ( qp - > ibqp . qp_type = = IB_QPT_UC )
goto send_comp ;
2016-01-19 14:42:39 -08:00
ibp - > rvp . n_rnr_naks + + ;
2015-07-30 15:17:43 -04:00
/*
* Note : we don ' t need the s_lock held since the BUSY flag
* makes this single threaded .
*/
if ( sqp - > s_rnr_retry = = 0 ) {
send_status = IB_WC_RNR_RETRY_EXC_ERR ;
goto serr ;
}
if ( sqp - > s_rnr_retry_cnt < 7 )
sqp - > s_rnr_retry - - ;
spin_lock_irqsave ( & sqp - > s_lock , flags ) ;
2016-01-19 14:43:33 -08:00
if ( ! ( ib_rvt_state_ops [ sqp - > state ] & RVT_PROCESS_RECV_OK ) )
2015-07-30 15:17:43 -04:00
goto clr_busy ;
2017-02-08 05:27:19 -08:00
rvt_add_rnr_timer ( sqp , qp - > r_min_rnr_timer < <
2017-02-08 05:28:25 -08:00
IB_AETH_CREDIT_SHIFT ) ;
2015-07-30 15:17:43 -04:00
goto clr_busy ;
op_err :
send_status = IB_WC_REM_OP_ERR ;
wc . status = IB_WC_LOC_QP_OP_ERR ;
goto err ;
inv_err :
send_status = IB_WC_REM_INV_REQ_ERR ;
wc . status = IB_WC_LOC_QP_OP_ERR ;
goto err ;
acc_err :
send_status = IB_WC_REM_ACCESS_ERR ;
wc . status = IB_WC_LOC_PROT_ERR ;
err :
/* responder goes to error state */
2017-02-08 05:27:01 -08:00
rvt_rc_error ( qp , wc . status ) ;
2015-07-30 15:17:43 -04:00
serr :
spin_lock_irqsave ( & sqp - > s_lock , flags ) ;
hfi1_send_complete ( sqp , wqe , send_status ) ;
if ( sqp - > ibqp . qp_type = = IB_QPT_RC ) {
2016-01-19 14:43:44 -08:00
int lastwqe = rvt_error_qp ( sqp , IB_WC_WR_FLUSH_ERR ) ;
2015-07-30 15:17:43 -04:00
2016-01-19 14:43:01 -08:00
sqp - > s_flags & = ~ RVT_S_BUSY ;
2015-07-30 15:17:43 -04:00
spin_unlock_irqrestore ( & sqp - > s_lock , flags ) ;
if ( lastwqe ) {
struct ib_event ev ;
ev . device = sqp - > ibqp . device ;
ev . element . qp = & sqp - > ibqp ;
ev . event = IB_EVENT_QP_LAST_WQE_REACHED ;
sqp - > ibqp . event_handler ( & ev , sqp - > ibqp . qp_context ) ;
}
goto done ;
}
clr_busy :
2016-01-19 14:43:01 -08:00
sqp - > s_flags & = ~ RVT_S_BUSY ;
2015-07-30 15:17:43 -04:00
unlock :
spin_unlock_irqrestore ( & sqp - > s_lock , flags ) ;
done :
rcu_read_unlock ( ) ;
}
/**
* hfi1_make_grh - construct a GRH header
* @ ibp : a pointer to the IB port
* @ hdr : a pointer to the GRH header being constructed
* @ grh : the global route address to send to
* @ hwords : the number of 32 bit words of header being sent
* @ nwords : the number of 32 bit words of data being sent
*
* Return the size of the header in 32 bit words .
*/
u32 hfi1_make_grh ( struct hfi1_ibport * ibp , struct ib_grh * hdr ,
2017-04-29 14:41:28 -04:00
const struct ib_global_route * grh , u32 hwords , u32 nwords )
2015-07-30 15:17:43 -04:00
{
hdr - > version_tclass_flow =
cpu_to_be32 ( ( IB_GRH_VERSION < < IB_GRH_VERSION_SHIFT ) |
( grh - > traffic_class < < IB_GRH_TCLASS_SHIFT ) |
( grh - > flow_label < < IB_GRH_FLOW_SHIFT ) ) ;
hdr - > paylen = cpu_to_be16 ( ( hwords - 2 + nwords + SIZE_OF_CRC ) < < 2 ) ;
/* next_hdr is defined by C8-7 in ch. 8.4.1 */
hdr - > next_hdr = IB_GRH_NEXT_HDR ;
hdr - > hop_limit = grh - > hop_limit ;
/* The SGID is 32-bit aligned. */
2016-01-19 14:42:39 -08:00
hdr - > sgid . global . subnet_prefix = ibp - > rvp . gid_prefix ;
2015-07-30 15:17:43 -04:00
hdr - > sgid . global . interface_id =
2016-10-17 04:19:30 -07:00
grh - > sgid_index < HFI1_GUIDS_PER_PORT ?
get_sguid ( ibp , grh - > sgid_index ) :
get_sguid ( ibp , HFI1_PORT_GUID_INDEX ) ;
2015-07-30 15:17:43 -04:00
hdr - > dgid = grh - > dgid ;
/* GRH header size in 32-bit words. */
return sizeof ( struct ib_grh ) / sizeof ( u32 ) ;
}
2017-08-04 13:54:04 -07:00
# define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
hdr . ibh . u . oth . bth [ 2 ] ) / 4 )
2015-07-30 15:17:43 -04:00
/**
2016-07-25 13:40:16 -07:00
* build_ahg - create ahg in s_ahg
2015-07-30 15:17:43 -04:00
* @ qp : a pointer to QP
* @ npsn : the next PSN for the request / response
*
* This routine handles the AHG by allocating an ahg entry and causing the
* copy of the first middle .
*
* Subsequent middles use the copied entry , editing the
* PSN with 1 or 2 edits .
*/
2016-01-19 14:42:28 -08:00
static inline void build_ahg ( struct rvt_qp * qp , u32 npsn )
2015-07-30 15:17:43 -04:00
{
2016-01-19 14:42:00 -08:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-02-14 12:44:43 -08:00
2016-01-19 14:43:01 -08:00
if ( unlikely ( qp - > s_flags & RVT_S_AHG_CLEAR ) )
2015-07-30 15:17:43 -04:00
clear_ahg ( qp ) ;
2016-01-19 14:43:01 -08:00
if ( ! ( qp - > s_flags & RVT_S_AHG_VALID ) ) {
2015-07-30 15:17:43 -04:00
/* first middle that needs copy */
2015-11-09 19:13:59 -05:00
if ( qp - > s_ahgidx < 0 )
2016-01-19 14:42:00 -08:00
qp - > s_ahgidx = sdma_ahg_alloc ( priv - > s_sde ) ;
2015-07-30 15:17:43 -04:00
if ( qp - > s_ahgidx > = 0 ) {
qp - > s_ahgpsn = npsn ;
2016-07-25 13:40:16 -07:00
priv - > s_ahg - > tx_flags | = SDMA_TXREQ_F_AHG_COPY ;
2015-07-30 15:17:43 -04:00
/* save to protect a change in another thread */
2016-07-25 13:40:16 -07:00
priv - > s_ahg - > ahgidx = qp - > s_ahgidx ;
2016-01-19 14:43:01 -08:00
qp - > s_flags | = RVT_S_AHG_VALID ;
2015-07-30 15:17:43 -04:00
}
} else {
/* subsequent middle after valid */
if ( qp - > s_ahgidx > = 0 ) {
2016-07-25 13:40:16 -07:00
priv - > s_ahg - > tx_flags | = SDMA_TXREQ_F_USE_AHG ;
priv - > s_ahg - > ahgidx = qp - > s_ahgidx ;
priv - > s_ahg - > ahgcount + + ;
priv - > s_ahg - > ahgdesc [ 0 ] =
2015-07-30 15:17:43 -04:00
sdma_build_ahg_descriptor (
( __force u16 ) cpu_to_be16 ( ( u16 ) npsn ) ,
BTH2_OFFSET ,
16 ,
16 ) ;
if ( ( npsn & 0xffff0000 ) ! =
( qp - > s_ahgpsn & 0xffff0000 ) ) {
2016-07-25 13:40:16 -07:00
priv - > s_ahg - > ahgcount + + ;
priv - > s_ahg - > ahgdesc [ 1 ] =
2015-07-30 15:17:43 -04:00
sdma_build_ahg_descriptor (
( __force u16 ) cpu_to_be16 (
( u16 ) ( npsn > > 16 ) ) ,
BTH2_OFFSET ,
0 ,
16 ) ;
}
}
}
}
2016-09-06 04:35:05 -07:00
void hfi1_make_ruc_header ( struct rvt_qp * qp , struct ib_other_headers * ohdr ,
2016-02-14 12:44:43 -08:00
u32 bth0 , u32 bth2 , int middle ,
struct hfi1_pkt_state * ps )
2015-07-30 15:17:43 -04:00
{
2016-01-19 14:42:00 -08:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-02-14 12:44:43 -08:00
struct hfi1_ibport * ibp = ps - > ibp ;
2015-07-30 15:17:43 -04:00
u16 lrh0 ;
u32 nwords ;
u32 extra_bytes ;
u32 bth1 ;
/* Construct the header. */
2016-12-07 19:33:00 -08:00
extra_bytes = - ps - > s_txreq - > s_cur_size & 3 ;
nwords = ( ps - > s_txreq - > s_cur_size + extra_bytes ) > > 2 ;
2015-07-30 15:17:43 -04:00
lrh0 = HFI1_LRH_BTH ;
2017-04-29 14:41:28 -04:00
if ( unlikely ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) & IB_AH_GRH ) ) {
qp - > s_hdrwords + =
hfi1_make_grh ( ibp ,
2017-08-04 13:54:04 -07:00
& ps - > s_txreq - > phdr . hdr . ibh . u . l . grh ,
& qp - > remote_ah_attr . grh ,
2017-04-29 14:41:28 -04:00
qp - > s_hdrwords , nwords ) ;
2015-07-30 15:17:43 -04:00
lrh0 = HFI1_LRH_GRH ;
middle = 0 ;
}
2017-04-29 14:41:28 -04:00
lrh0 | = ( priv - > s_sc & 0xf ) < < 12 |
( rdma_ah_get_sl ( & qp - > remote_ah_attr ) & 0xf ) < < 4 ;
2015-07-30 15:17:43 -04:00
/*
2016-07-25 13:40:16 -07:00
* reset s_ahg / AHG fields
2015-07-30 15:17:43 -04:00
*
* This insures that the ahgentry / ahgcount
* are at a non - AHG default to protect
* build_verbs_tx_desc ( ) from using
* an include ahgidx .
*
* build_ahg ( ) will modify as appropriate
* to use the AHG feature .
*/
2016-07-25 13:40:16 -07:00
priv - > s_ahg - > tx_flags = 0 ;
priv - > s_ahg - > ahgcount = 0 ;
priv - > s_ahg - > ahgidx = 0 ;
2015-07-30 15:17:43 -04:00
if ( qp - > s_mig_state = = IB_MIG_MIGRATED )
bth0 | = IB_BTH_MIG_REQ ;
else
middle = 0 ;
if ( middle )
build_ahg ( qp , bth2 ) ;
else
2016-01-19 14:43:01 -08:00
qp - > s_flags & = ~ RVT_S_AHG_VALID ;
2017-08-04 13:54:04 -07:00
ps - > s_txreq - > phdr . hdr . ibh . lrh [ 0 ] = cpu_to_be16 ( lrh0 ) ;
ps - > s_txreq - > phdr . hdr . ibh . lrh [ 1 ] =
2017-04-29 14:41:28 -04:00
cpu_to_be16 ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ) ;
2017-08-04 13:54:04 -07:00
ps - > s_txreq - > phdr . hdr . ibh . lrh [ 2 ] =
2015-07-30 15:17:43 -04:00
cpu_to_be16 ( qp - > s_hdrwords + nwords + SIZE_OF_CRC ) ;
2017-08-04 13:54:04 -07:00
ps - > s_txreq - > phdr . hdr . ibh . lrh [ 3 ] =
2017-04-29 14:41:28 -04:00
cpu_to_be16 ( ppd_from_ibp ( ibp ) - > lid |
2017-08-04 13:54:04 -07:00
rdma_ah_get_path_bits ( & qp - > remote_ah_attr ) ) ;
2015-07-30 15:17:43 -04:00
bth0 | = hfi1_get_pkey ( ibp , qp - > s_pkey_index ) ;
bth0 | = extra_bytes < < 20 ;
ohdr - > bth [ 0 ] = cpu_to_be32 ( bth0 ) ;
bth1 = qp - > remote_qpn ;
2016-01-19 14:43:01 -08:00
if ( qp - > s_flags & RVT_S_ECN ) {
qp - > s_flags & = ~ RVT_S_ECN ;
2015-07-30 15:17:43 -04:00
/* we recently received a FECN, so return a BECN */
2017-04-09 10:16:28 -07:00
bth1 | = ( IB_BECN_MASK < < IB_BECN_SHIFT ) ;
2015-07-30 15:17:43 -04:00
}
ohdr - > bth [ 1 ] = cpu_to_be32 ( bth1 ) ;
ohdr - > bth [ 2 ] = cpu_to_be32 ( bth2 ) ;
}
2015-10-26 10:28:35 -04:00
/* when sending, force a reschedule every one of these periods */
# define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
2017-05-04 05:14:10 -07:00
/**
* schedule_send_yield - test for a yield required for QP send engine
* @ timeout : Final time for timeout slice for jiffies
* @ qp : a pointer to QP
* @ ps : a pointer to a structure with commonly lookup values for
* the the send engine progress
*
* This routine checks if the time slice for the QP has expired
* for RC QPs , if so an additional work entry is queued . At this
* point , other QPs have an opportunity to be scheduled . It
* returns true if a yield is required , otherwise , false
* is returned .
*/
static bool schedule_send_yield ( struct rvt_qp * qp ,
struct hfi1_pkt_state * ps )
{
2017-07-24 07:45:37 -07:00
ps - > pkts_sent = true ;
2017-05-04 05:14:10 -07:00
if ( unlikely ( time_after ( jiffies , ps - > timeout ) ) ) {
if ( ! ps - > in_thread | |
workqueue_congested ( ps - > cpu , ps - > ppd - > hfi1_wq ) ) {
spin_lock_irqsave ( & qp - > s_lock , ps - > flags ) ;
qp - > s_flags & = ~ RVT_S_BUSY ;
hfi1_schedule_send ( qp ) ;
spin_unlock_irqrestore ( & qp - > s_lock , ps - > flags ) ;
this_cpu_inc ( * ps - > ppd - > dd - > send_schedule ) ;
trace_hfi1_rc_expired_time_slice ( qp , true ) ;
return true ;
}
cond_resched ( ) ;
this_cpu_inc ( * ps - > ppd - > dd - > send_schedule ) ;
ps - > timeout = jiffies + ps - > timeout_int ;
}
trace_hfi1_rc_expired_time_slice ( qp , false ) ;
return false ;
}
2017-04-09 10:16:35 -07:00
void hfi1_do_send_from_rvt ( struct rvt_qp * qp )
{
hfi1_do_send ( qp , false ) ;
}
2016-01-19 14:43:33 -08:00
void _hfi1_do_send ( struct work_struct * work )
{
struct iowait * wait = container_of ( work , struct iowait , iowork ) ;
struct rvt_qp * qp = iowait_to_qp ( wait ) ;
2017-04-09 10:16:35 -07:00
hfi1_do_send ( qp , true ) ;
2016-01-19 14:43:33 -08:00
}
2015-07-30 15:17:43 -04:00
/**
* hfi1_do_send - perform a send on a QP
* @ work : contains a pointer to the QP
2017-04-09 10:16:35 -07:00
* @ in_thread : true if in a workqueue thread
2015-07-30 15:17:43 -04:00
*
* Process entries in the send work queue until credit or queue is
2016-09-25 07:42:08 -07:00
* exhausted . Only allow one CPU to send a packet per QP .
2015-07-30 15:17:43 -04:00
* Otherwise , two threads could send packets out of order .
*/
2017-04-09 10:16:35 -07:00
void hfi1_do_send ( struct rvt_qp * qp , bool in_thread )
2015-07-30 15:17:43 -04:00
{
2015-11-11 00:34:37 -05:00
struct hfi1_pkt_state ps ;
2016-02-03 14:34:23 -08:00
struct hfi1_qp_priv * priv = qp - > priv ;
2016-02-14 12:44:43 -08:00
int ( * make_req ) ( struct rvt_qp * qp , struct hfi1_pkt_state * ps ) ;
2015-07-30 15:17:43 -04:00
2015-11-11 00:34:37 -05:00
ps . dev = to_idev ( qp - > ibqp . device ) ;
ps . ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ps . ppd = ppd_from_ibp ( ps . ibp ) ;
2017-05-04 05:14:10 -07:00
ps . in_thread = in_thread ;
trace_hfi1_rc_do_send ( qp , in_thread ) ;
2015-11-11 00:34:37 -05:00
2016-02-03 14:34:23 -08:00
switch ( qp - > ibqp . qp_type ) {
case IB_QPT_RC :
2017-04-29 14:41:28 -04:00
if ( ! loopback & & ( ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) &
~ ( ( 1 < < ps . ppd - > lmc ) - 1 ) ) = =
ps . ppd - > lid ) ) {
2016-02-03 14:34:23 -08:00
ruc_loopback ( qp ) ;
return ;
}
2015-07-30 15:17:43 -04:00
make_req = hfi1_make_rc_req ;
2017-05-04 05:14:10 -07:00
ps . timeout_int = qp - > timeout_jiffies ;
2016-02-03 14:34:23 -08:00
break ;
case IB_QPT_UC :
2017-04-29 14:41:28 -04:00
if ( ! loopback & & ( ( rdma_ah_get_dlid ( & qp - > remote_ah_attr ) &
~ ( ( 1 < < ps . ppd - > lmc ) - 1 ) ) = =
ps . ppd - > lid ) ) {
2016-02-03 14:34:23 -08:00
ruc_loopback ( qp ) ;
return ;
}
2015-07-30 15:17:43 -04:00
make_req = hfi1_make_uc_req ;
2017-05-04 05:14:10 -07:00
ps . timeout_int = SEND_RESCHED_TIMEOUT ;
2016-02-03 14:34:23 -08:00
break ;
default :
2015-07-30 15:17:43 -04:00
make_req = hfi1_make_ud_req ;
2017-05-04 05:14:10 -07:00
ps . timeout_int = SEND_RESCHED_TIMEOUT ;
2016-02-03 14:34:23 -08:00
}
2015-07-30 15:17:43 -04:00
2016-04-12 10:46:10 -07:00
spin_lock_irqsave ( & qp - > s_lock , ps . flags ) ;
2015-07-30 15:17:43 -04:00
/* Return if we are already busy processing a work request. */
if ( ! hfi1_send_ok ( qp ) ) {
2016-04-12 10:46:10 -07:00
spin_unlock_irqrestore ( & qp - > s_lock , ps . flags ) ;
2015-07-30 15:17:43 -04:00
return ;
}
2016-01-19 14:43:01 -08:00
qp - > s_flags | = RVT_S_BUSY ;
2015-07-30 15:17:43 -04:00
2017-05-04 05:14:10 -07:00
ps . timeout_int = ps . timeout_int / 8 ;
ps . timeout = jiffies + ps . timeout_int ;
ps . cpu = priv - > s_sde ? priv - > s_sde - > cpu :
2016-02-03 14:34:23 -08:00
cpumask_first ( cpumask_of_node ( ps . ppd - > dd - > node ) ) ;
2017-07-24 07:45:37 -07:00
ps . pkts_sent = false ;
2017-05-04 05:14:10 -07:00
2016-02-14 12:45:18 -08:00
/* insure a pre-built packet is handled */
ps . s_txreq = get_waiting_verbs_txreq ( qp ) ;
2015-07-30 15:17:43 -04:00
do {
/* Check for a constructed packet to be sent. */
if ( qp - > s_hdrwords ! = 0 ) {
2016-04-12 10:46:10 -07:00
spin_unlock_irqrestore ( & qp - > s_lock , ps . flags ) ;
2015-07-30 15:17:43 -04:00
/*
* If the packet cannot be sent now , return and
2016-09-25 07:42:08 -07:00
* the send engine will be woken up later .
2015-07-30 15:17:43 -04:00
*/
2015-11-11 00:34:37 -05:00
if ( hfi1_verbs_send ( qp , & ps ) )
2016-02-14 12:10:04 -08:00
return ;
2016-07-25 13:40:16 -07:00
/* Record that s_ahg is empty. */
2015-07-30 15:17:43 -04:00
qp - > s_hdrwords = 0 ;
2016-02-14 12:10:04 -08:00
/* allow other tasks to run */
2017-05-04 05:14:10 -07:00
if ( schedule_send_yield ( qp , & ps ) )
return ;
2016-04-12 10:46:10 -07:00
spin_lock_irqsave ( & qp - > s_lock , ps . flags ) ;
2015-10-26 10:28:35 -04:00
}
2016-02-14 12:44:43 -08:00
} while ( make_req ( qp , & ps ) ) ;
2017-07-24 07:45:37 -07:00
iowait_starve_clear ( ps . pkts_sent , & priv - > s_iowait ) ;
2016-04-12 10:46:10 -07:00
spin_unlock_irqrestore ( & qp - > s_lock , ps . flags ) ;
2015-07-30 15:17:43 -04:00
}
/*
* This should be called with s_lock held .
*/
2016-01-19 14:42:28 -08:00
void hfi1_send_complete ( struct rvt_qp * qp , struct rvt_swqe * wqe ,
2015-07-30 15:17:43 -04:00
enum ib_wc_status status )
{
u32 old_last , last ;
2016-01-19 14:43:33 -08:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_OR_FLUSH_SEND ) )
2015-07-30 15:17:43 -04:00
return ;
2016-02-04 11:03:19 -08:00
last = qp - > s_last ;
old_last = last ;
2017-03-20 17:25:23 -07:00
trace_hfi1_qp_send_completion ( qp , wqe , last ) ;
2016-02-04 11:03:19 -08:00
if ( + + last > = qp - > s_size )
last = 0 ;
2017-03-20 17:25:23 -07:00
trace_hfi1_qp_send_completion ( qp , wqe , last ) ;
2016-02-04 11:03:19 -08:00
qp - > s_last = last ;
/* See post_send() */
barrier ( ) ;
2016-12-07 19:34:31 -08:00
rvt_put_swqe ( wqe ) ;
2015-07-30 15:17:43 -04:00
if ( qp - > ibqp . qp_type = = IB_QPT_UD | |
qp - > ibqp . qp_type = = IB_QPT_SMI | |
qp - > ibqp . qp_type = = IB_QPT_GSI )
2016-01-19 14:42:17 -08:00
atomic_dec ( & ibah_to_rvtah ( wqe - > ud_wr . ah ) - > refcount ) ;
2015-07-30 15:17:43 -04:00
2017-03-20 17:25:04 -07:00
rvt_qp_swqe_complete ( qp ,
wqe ,
ib_hfi1_wc_opcode [ wqe - > wr . opcode ] ,
status ) ;
2015-07-30 15:17:43 -04:00
if ( qp - > s_acked = = old_last )
qp - > s_acked = last ;
if ( qp - > s_cur = = old_last )
qp - > s_cur = last ;
if ( qp - > s_tail = = old_last )
qp - > s_tail = last ;
if ( qp - > state = = IB_QPS_SQD & & last = = qp - > s_cur )
qp - > s_draining = 0 ;
}