2010-05-24 08:44:54 +04:00
/*
* Copyright ( c ) 2006 , 2007 , 2008 , 2009 , 2010 QLogic Corporation .
* All rights reserved .
* Copyright ( c ) 2005 , 2006 PathScale , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include "qib.h"
/* cut down ridiculously long IB macro names */
# define OP(x) IB_OPCODE_UC_##x
/**
* qib_make_uc_req - construct a request packet ( SEND , RDMA write )
* @ qp : a pointer to the QP
2021-01-21 12:45:07 +03:00
* @ flags : unused
2010-05-24 08:44:54 +04:00
*
2016-02-14 23:10:04 +03:00
* Assumes the s_lock is held .
*
2010-05-24 08:44:54 +04:00
* Return 1 if constructed ; otherwise , return 0.
*/
2016-04-12 20:46:10 +03:00
int qib_make_uc_req ( struct rvt_qp * qp , unsigned long * flags )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:45:11 +03:00
struct qib_qp_priv * priv = qp - > priv ;
2016-09-06 14:35:05 +03:00
struct ib_other_headers * ohdr ;
2016-01-22 23:45:59 +03:00
struct rvt_swqe * wqe ;
2010-05-24 08:44:54 +04:00
u32 hwords ;
u32 bth0 ;
u32 len ;
2011-09-23 21:16:34 +04:00
u32 pmtu = qp - > pmtu ;
2010-05-24 08:44:54 +04:00
int ret = 0 ;
2016-01-23 00:07:42 +03:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_SEND_OK ) ) {
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_FLUSH_SEND ) )
2010-05-24 08:44:54 +04:00
goto bail ;
/* We are in the error state, flush the work request. */
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
if ( qp - > s_last = = READ_ONCE ( qp - > s_head ) )
2010-05-24 08:44:54 +04:00
goto bail ;
/* If DMAs are in progress, we can't flush immediately. */
2016-01-22 23:45:11 +03:00
if ( atomic_read ( & priv - > s_dma_busy ) ) {
2016-01-22 23:56:46 +03:00
qp - > s_flags | = RVT_S_WAIT_DMA ;
2010-05-24 08:44:54 +04:00
goto bail ;
}
2016-01-23 00:07:42 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_last ) ;
2018-09-26 20:44:42 +03:00
rvt_send_complete ( qp , wqe , IB_WC_WR_FLUSH_ERR ) ;
2010-05-24 08:44:54 +04:00
goto done ;
}
2016-01-22 23:45:11 +03:00
ohdr = & priv - > s_hdr - > u . oth ;
2017-04-29 21:41:28 +03:00
if ( rdma_ah_get_ah_flags ( & qp - > remote_ah_attr ) & IB_AH_GRH )
2016-01-22 23:45:11 +03:00
ohdr = & priv - > s_hdr - > u . l . oth ;
2010-05-24 08:44:54 +04:00
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5 ;
bth0 = 0 ;
/* Get the next send request. */
2016-01-23 00:07:42 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_cur ) ;
2010-05-24 08:44:54 +04:00
qp - > s_wqe = NULL ;
switch ( qp - > s_state ) {
default :
2016-01-23 00:07:42 +03:00
if ( ! ( ib_rvt_state_ops [ qp - > state ] &
RVT_PROCESS_NEXT_SEND_OK ) )
2010-05-24 08:44:54 +04:00
goto bail ;
/* Check if send work queue is empty. */
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
if ( qp - > s_cur = = READ_ONCE ( qp - > s_head ) )
2010-05-24 08:44:54 +04:00
goto bail ;
/*
* Start a new request .
*/
2016-02-14 23:10:04 +03:00
qp - > s_psn = wqe - > psn ;
2010-05-24 08:44:54 +04:00
qp - > s_sge . sge = wqe - > sg_list [ 0 ] ;
qp - > s_sge . sg_list = wqe - > sg_list + 1 ;
qp - > s_sge . num_sge = wqe - > wr . num_sge ;
qp - > s_sge . total_len = wqe - > length ;
len = wqe - > length ;
qp - > s_len = len ;
switch ( wqe - > wr . opcode ) {
case IB_WR_SEND :
case IB_WR_SEND_WITH_IMM :
if ( len > pmtu ) {
qp - > s_state = OP ( SEND_FIRST ) ;
len = pmtu ;
break ;
}
if ( wqe - > wr . opcode = = IB_WR_SEND )
qp - > s_state = OP ( SEND_ONLY ) ;
else {
qp - > s_state =
OP ( SEND_ONLY_WITH_IMMEDIATE ) ;
/* Immediate data comes after the BTH */
ohdr - > u . imm_data = wqe - > wr . ex . imm_data ;
hwords + = 1 ;
}
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
qp - > s_wqe = wqe ;
if ( + + qp - > s_cur > = qp - > s_size )
qp - > s_cur = 0 ;
break ;
case IB_WR_RDMA_WRITE :
case IB_WR_RDMA_WRITE_WITH_IMM :
ohdr - > u . rc . reth . vaddr =
2015-10-08 11:16:33 +03:00
cpu_to_be64 ( wqe - > rdma_wr . remote_addr ) ;
2010-05-24 08:44:54 +04:00
ohdr - > u . rc . reth . rkey =
2015-10-08 11:16:33 +03:00
cpu_to_be32 ( wqe - > rdma_wr . rkey ) ;
2010-05-24 08:44:54 +04:00
ohdr - > u . rc . reth . length = cpu_to_be32 ( len ) ;
hwords + = sizeof ( struct ib_reth ) / 4 ;
if ( len > pmtu ) {
qp - > s_state = OP ( RDMA_WRITE_FIRST ) ;
len = pmtu ;
break ;
}
if ( wqe - > wr . opcode = = IB_WR_RDMA_WRITE )
qp - > s_state = OP ( RDMA_WRITE_ONLY ) ;
else {
qp - > s_state =
OP ( RDMA_WRITE_ONLY_WITH_IMMEDIATE ) ;
/* Immediate data comes after the RETH */
ohdr - > u . rc . imm_data = wqe - > wr . ex . imm_data ;
hwords + = 1 ;
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
}
qp - > s_wqe = wqe ;
if ( + + qp - > s_cur > = qp - > s_size )
qp - > s_cur = 0 ;
break ;
default :
goto bail ;
}
break ;
case OP ( SEND_FIRST ) :
qp - > s_state = OP ( SEND_MIDDLE ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2010-05-24 08:44:54 +04:00
case OP ( SEND_MIDDLE ) :
len = qp - > s_len ;
if ( len > pmtu ) {
len = pmtu ;
break ;
}
if ( wqe - > wr . opcode = = IB_WR_SEND )
qp - > s_state = OP ( SEND_LAST ) ;
else {
qp - > s_state = OP ( SEND_LAST_WITH_IMMEDIATE ) ;
/* Immediate data comes after the BTH */
ohdr - > u . imm_data = wqe - > wr . ex . imm_data ;
hwords + = 1 ;
}
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
qp - > s_wqe = wqe ;
if ( + + qp - > s_cur > = qp - > s_size )
qp - > s_cur = 0 ;
break ;
case OP ( RDMA_WRITE_FIRST ) :
qp - > s_state = OP ( RDMA_WRITE_MIDDLE ) ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2010-05-24 08:44:54 +04:00
case OP ( RDMA_WRITE_MIDDLE ) :
len = qp - > s_len ;
if ( len > pmtu ) {
len = pmtu ;
break ;
}
if ( wqe - > wr . opcode = = IB_WR_RDMA_WRITE )
qp - > s_state = OP ( RDMA_WRITE_LAST ) ;
else {
qp - > s_state =
OP ( RDMA_WRITE_LAST_WITH_IMMEDIATE ) ;
/* Immediate data comes after the BTH */
ohdr - > u . imm_data = wqe - > wr . ex . imm_data ;
hwords + = 1 ;
if ( wqe - > wr . send_flags & IB_SEND_SOLICITED )
bth0 | = IB_BTH_SOLICITED ;
}
qp - > s_wqe = wqe ;
if ( + + qp - > s_cur > = qp - > s_size )
qp - > s_cur = 0 ;
break ;
}
qp - > s_len - = len ;
qp - > s_hdrwords = hwords ;
qp - > s_cur_sge = & qp - > s_sge ;
qp - > s_cur_size = len ;
qib_make_ruc_header ( qp , ohdr , bth0 | ( qp - > s_state < < 24 ) ,
2016-02-14 23:10:04 +03:00
qp - > s_psn + + & QIB_PSN_MASK ) ;
2010-05-24 08:44:54 +04:00
done :
2016-02-14 23:10:04 +03:00
return 1 ;
2010-05-24 08:44:54 +04:00
bail :
2016-01-22 23:56:46 +03:00
qp - > s_flags & = ~ RVT_S_BUSY ;
2010-05-24 08:44:54 +04:00
return ret ;
}
/**
* qib_uc_rcv - handle an incoming UC packet
* @ ibp : the port the packet came in on
* @ hdr : the header of the packet
* @ has_grh : true if the packet has a GRH
* @ data : the packet data
* @ tlen : the length of the packet
* @ qp : the QP for this packet .
*
* This is called from qib_qp_rcv ( ) to process an incoming UC packet
* for the given QP .
* Called at interrupt level .
*/
2016-09-06 14:35:05 +03:00
void qib_uc_rcv ( struct qib_ibport * ibp , struct ib_header * hdr ,
2016-01-22 23:45:59 +03:00
int has_grh , void * data , u32 tlen , struct rvt_qp * qp )
2010-05-24 08:44:54 +04:00
{
2016-09-06 14:35:05 +03:00
struct ib_other_headers * ohdr ;
2010-05-24 08:44:54 +04:00
u32 opcode ;
u32 hdrsize ;
u32 psn ;
u32 pad ;
struct ib_wc wc ;
2011-09-23 21:16:34 +04:00
u32 pmtu = qp - > pmtu ;
2010-05-24 08:44:54 +04:00
struct ib_reth * reth ;
int ret ;
/* Check for GRH */
if ( ! has_grh ) {
ohdr = & hdr - > u . oth ;
hdrsize = 8 + 12 ; /* LRH + BTH */
} else {
ohdr = & hdr - > u . l . oth ;
hdrsize = 8 + 40 + 12 ; /* LRH + GRH + BTH */
}
opcode = be32_to_cpu ( ohdr - > bth [ 0 ] ) ;
if ( qib_ruc_check_hdr ( ibp , hdr , has_grh , qp , opcode ) )
2011-09-23 21:17:00 +04:00
return ;
2010-05-24 08:44:54 +04:00
psn = be32_to_cpu ( ohdr - > bth [ 2 ] ) ;
opcode > > = 24 ;
/* Compare the PSN verses the expected PSN. */
if ( unlikely ( qib_cmp24 ( psn , qp - > r_psn ) ! = 0 ) ) {
/*
* Handle a sequence error .
* Silently drop any current message .
*/
qp - > r_psn = psn ;
inv :
if ( qp - > r_state = = OP ( SEND_FIRST ) | |
qp - > r_state = = OP ( SEND_MIDDLE ) ) {
2016-01-22 23:56:46 +03:00
set_bit ( RVT_R_REWIND_SGE , & qp - > r_aflags ) ;
2010-05-24 08:44:54 +04:00
qp - > r_sge . num_sge = 0 ;
} else
2016-02-04 01:20:27 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2010-05-24 08:44:54 +04:00
qp - > r_state = OP ( SEND_LAST ) ;
switch ( opcode ) {
case OP ( SEND_FIRST ) :
case OP ( SEND_ONLY ) :
case OP ( SEND_ONLY_WITH_IMMEDIATE ) :
goto send_first ;
case OP ( RDMA_WRITE_FIRST ) :
case OP ( RDMA_WRITE_ONLY ) :
case OP ( RDMA_WRITE_ONLY_WITH_IMMEDIATE ) :
goto rdma_first ;
default :
goto drop ;
}
}
/* Check for opcode sequence errors. */
switch ( qp - > r_state ) {
case OP ( SEND_FIRST ) :
case OP ( SEND_MIDDLE ) :
if ( opcode = = OP ( SEND_MIDDLE ) | |
opcode = = OP ( SEND_LAST ) | |
opcode = = OP ( SEND_LAST_WITH_IMMEDIATE ) )
break ;
goto inv ;
case OP ( RDMA_WRITE_FIRST ) :
case OP ( RDMA_WRITE_MIDDLE ) :
if ( opcode = = OP ( RDMA_WRITE_MIDDLE ) | |
opcode = = OP ( RDMA_WRITE_LAST ) | |
opcode = = OP ( RDMA_WRITE_LAST_WITH_IMMEDIATE ) )
break ;
goto inv ;
default :
if ( opcode = = OP ( SEND_FIRST ) | |
opcode = = OP ( SEND_ONLY ) | |
opcode = = OP ( SEND_ONLY_WITH_IMMEDIATE ) | |
opcode = = OP ( RDMA_WRITE_FIRST ) | |
opcode = = OP ( RDMA_WRITE_ONLY ) | |
opcode = = OP ( RDMA_WRITE_ONLY_WITH_IMMEDIATE ) )
break ;
goto inv ;
}
2017-02-08 16:27:01 +03:00
if ( qp - > state = = IB_QPS_RTR & & ! ( qp - > r_flags & RVT_R_COMM_EST ) )
rvt_comm_est ( qp ) ;
2010-05-24 08:44:54 +04:00
/* OK, process the packet. */
switch ( opcode ) {
case OP ( SEND_FIRST ) :
case OP ( SEND_ONLY ) :
case OP ( SEND_ONLY_WITH_IMMEDIATE ) :
send_first :
2016-01-22 23:56:46 +03:00
if ( test_and_clear_bit ( RVT_R_REWIND_SGE , & qp - > r_aflags ) )
2010-05-24 08:44:54 +04:00
qp - > r_sge = qp - > s_rdma_read_sge ;
else {
2018-05-02 16:44:03 +03:00
ret = rvt_get_rwqe ( qp , false ) ;
2010-05-24 08:44:54 +04:00
if ( ret < 0 )
goto op_err ;
if ( ! ret )
goto drop ;
/*
* qp - > s_rdma_read_sge will be the owner
* of the mr references .
*/
qp - > s_rdma_read_sge = qp - > r_sge ;
}
qp - > r_rcv_len = 0 ;
if ( opcode = = OP ( SEND_ONLY ) )
2011-09-23 21:16:29 +04:00
goto no_immediate_data ;
2010-05-24 08:44:54 +04:00
else if ( opcode = = OP ( SEND_ONLY_WITH_IMMEDIATE ) )
goto send_last_imm ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2010-05-24 08:44:54 +04:00
case OP ( SEND_MIDDLE ) :
/* Check for invalid length PMTU or posted rwqe len. */
if ( unlikely ( tlen ! = ( hdrsize + pmtu + 4 ) ) )
goto rewind ;
qp - > r_rcv_len + = pmtu ;
if ( unlikely ( qp - > r_rcv_len > qp - > r_len ) )
goto rewind ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , data , pmtu , false , false ) ;
2010-05-24 08:44:54 +04:00
break ;
case OP ( SEND_LAST_WITH_IMMEDIATE ) :
send_last_imm :
wc . ex . imm_data = ohdr - > u . imm_data ;
hdrsize + = 4 ;
wc . wc_flags = IB_WC_WITH_IMM ;
2011-09-23 21:16:29 +04:00
goto send_last ;
2010-05-24 08:44:54 +04:00
case OP ( SEND_LAST ) :
2011-09-23 21:16:29 +04:00
no_immediate_data :
wc . ex . imm_data = 0 ;
wc . wc_flags = 0 ;
2010-05-24 08:44:54 +04:00
send_last :
/* Get the number of bytes the message was padded by. */
pad = ( be32_to_cpu ( ohdr - > bth [ 0 ] ) > > 20 ) & 3 ;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if ( unlikely ( tlen < ( hdrsize + pad + 4 ) ) )
goto rewind ;
/* Don't count the CRC. */
tlen - = ( hdrsize + pad + 4 ) ;
wc . byte_len = tlen + qp - > r_rcv_len ;
if ( unlikely ( wc . byte_len > qp - > r_len ) )
goto rewind ;
wc . opcode = IB_WC_RECV ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , data , tlen , false , false ) ;
2016-02-04 01:20:27 +03:00
rvt_put_ss ( & qp - > s_rdma_read_sge ) ;
2012-06-28 02:33:05 +04:00
last_imm :
2010-05-24 08:44:54 +04:00
wc . wr_id = qp - > r_wr_id ;
wc . status = IB_WC_SUCCESS ;
wc . qp = & qp - > ibqp ;
wc . src_qp = qp - > remote_qpn ;
2017-04-29 21:41:28 +03:00
wc . slid = rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ;
wc . sl = rdma_ah_get_sl ( & qp - > remote_ah_attr ) ;
2011-09-23 21:16:29 +04:00
/* zero fields that are N/A */
wc . vendor_err = 0 ;
wc . pkey_index = 0 ;
wc . dlid_path_bits = 0 ;
wc . port_num = 0 ;
2010-05-24 08:44:54 +04:00
/* Signal completion event if the solicited bit is set. */
2019-06-28 21:21:52 +03:00
rvt_recv_cq ( qp , & wc , ib_bth_is_solicited ( ohdr ) ) ;
2010-05-24 08:44:54 +04:00
break ;
case OP ( RDMA_WRITE_FIRST ) :
case OP ( RDMA_WRITE_ONLY ) :
case OP ( RDMA_WRITE_ONLY_WITH_IMMEDIATE ) : /* consume RWQE */
rdma_first :
if ( unlikely ( ! ( qp - > qp_access_flags &
IB_ACCESS_REMOTE_WRITE ) ) ) {
goto drop ;
}
reth = & ohdr - > u . rc . reth ;
hdrsize + = sizeof ( * reth ) ;
qp - > r_len = be32_to_cpu ( reth - > length ) ;
qp - > r_rcv_len = 0 ;
qp - > r_sge . sg_list = NULL ;
if ( qp - > r_len ! = 0 ) {
u32 rkey = be32_to_cpu ( reth - > rkey ) ;
u64 vaddr = be64_to_cpu ( reth - > vaddr ) ;
int ok ;
/* Check rkey */
2016-01-22 23:45:59 +03:00
ok = rvt_rkey_ok ( qp , & qp - > r_sge . sge , qp - > r_len ,
2010-05-24 08:44:54 +04:00
vaddr , rkey , IB_ACCESS_REMOTE_WRITE ) ;
if ( unlikely ( ! ok ) )
goto drop ;
qp - > r_sge . num_sge = 1 ;
} else {
qp - > r_sge . num_sge = 0 ;
qp - > r_sge . sge . mr = NULL ;
qp - > r_sge . sge . vaddr = NULL ;
qp - > r_sge . sge . length = 0 ;
qp - > r_sge . sge . sge_length = 0 ;
}
if ( opcode = = OP ( RDMA_WRITE_ONLY ) )
goto rdma_last ;
2010-10-23 02:00:48 +04:00
else if ( opcode = = OP ( RDMA_WRITE_ONLY_WITH_IMMEDIATE ) ) {
wc . ex . imm_data = ohdr - > u . rc . imm_data ;
2010-05-24 08:44:54 +04:00
goto rdma_last_imm ;
2010-10-23 02:00:48 +04:00
}
2020-08-24 01:36:59 +03:00
fallthrough ;
2010-05-24 08:44:54 +04:00
case OP ( RDMA_WRITE_MIDDLE ) :
/* Check for invalid length PMTU or posted rwqe len. */
if ( unlikely ( tlen ! = ( hdrsize + pmtu + 4 ) ) )
goto drop ;
qp - > r_rcv_len + = pmtu ;
if ( unlikely ( qp - > r_rcv_len > qp - > r_len ) )
goto drop ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , data , pmtu , true , false ) ;
2010-05-24 08:44:54 +04:00
break ;
case OP ( RDMA_WRITE_LAST_WITH_IMMEDIATE ) :
wc . ex . imm_data = ohdr - > u . imm_data ;
2010-10-23 02:00:48 +04:00
rdma_last_imm :
2010-05-24 08:44:54 +04:00
hdrsize + = 4 ;
wc . wc_flags = IB_WC_WITH_IMM ;
/* Get the number of bytes the message was padded by. */
pad = ( be32_to_cpu ( ohdr - > bth [ 0 ] ) > > 20 ) & 3 ;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if ( unlikely ( tlen < ( hdrsize + pad + 4 ) ) )
goto drop ;
/* Don't count the CRC. */
tlen - = ( hdrsize + pad + 4 ) ;
if ( unlikely ( tlen + qp - > r_rcv_len ! = qp - > r_len ) )
goto drop ;
2016-01-22 23:56:46 +03:00
if ( test_and_clear_bit ( RVT_R_REWIND_SGE , & qp - > r_aflags ) )
2016-02-04 01:20:27 +03:00
rvt_put_ss ( & qp - > s_rdma_read_sge ) ;
2010-05-24 08:44:54 +04:00
else {
2018-05-02 16:44:03 +03:00
ret = rvt_get_rwqe ( qp , true ) ;
2010-05-24 08:44:54 +04:00
if ( ret < 0 )
goto op_err ;
if ( ! ret )
goto drop ;
}
wc . byte_len = qp - > r_len ;
wc . opcode = IB_WC_RECV_RDMA_WITH_IMM ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , data , tlen , true , false ) ;
2016-02-04 01:20:27 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2010-05-24 08:44:54 +04:00
goto last_imm ;
case OP ( RDMA_WRITE_LAST ) :
rdma_last :
/* Get the number of bytes the message was padded by. */
pad = ( be32_to_cpu ( ohdr - > bth [ 0 ] ) > > 20 ) & 3 ;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if ( unlikely ( tlen < ( hdrsize + pad + 4 ) ) )
goto drop ;
/* Don't count the CRC. */
tlen - = ( hdrsize + pad + 4 ) ;
if ( unlikely ( tlen + qp - > r_rcv_len ! = qp - > r_len ) )
goto drop ;
2018-09-26 20:44:33 +03:00
rvt_copy_sge ( qp , & qp - > r_sge , data , tlen , true , false ) ;
2016-02-04 01:20:27 +03:00
rvt_put_ss ( & qp - > r_sge ) ;
2010-05-24 08:44:54 +04:00
break ;
default :
/* Drop packet for unknown opcodes. */
goto drop ;
}
qp - > r_psn + + ;
qp - > r_state = opcode ;
return ;
rewind :
2016-01-22 23:56:46 +03:00
set_bit ( RVT_R_REWIND_SGE , & qp - > r_aflags ) ;
2010-05-24 08:44:54 +04:00
qp - > r_sge . num_sge = 0 ;
drop :
2016-01-22 23:56:02 +03:00
ibp - > rvp . n_pkt_drops + + ;
2010-05-24 08:44:54 +04:00
return ;
op_err :
2017-02-08 16:27:01 +03:00
rvt_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
2010-05-24 08:44:54 +04:00
return ;
}