2020-08-27 09:54:40 -05:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
# include <linux/skbuff.h>
# include "rxe.h"
# include "rxe_loc.h"
# include "rxe_queue.h"
enum resp_states {
RESPST_NONE ,
RESPST_GET_REQ ,
RESPST_CHK_PSN ,
RESPST_CHK_OP_SEQ ,
RESPST_CHK_OP_VALID ,
RESPST_CHK_RESOURCE ,
RESPST_CHK_LENGTH ,
RESPST_CHK_RKEY ,
RESPST_EXECUTE ,
RESPST_READ_REPLY ,
2022-06-06 09:38:34 -05:00
RESPST_ATOMIC_REPLY ,
2016-06-16 16:45:23 +03:00
RESPST_COMPLETE ,
RESPST_ACKNOWLEDGE ,
RESPST_CLEANUP ,
RESPST_DUPLICATE_REQUEST ,
RESPST_ERR_MALFORMED_WQE ,
RESPST_ERR_UNSUPPORTED_OPCODE ,
RESPST_ERR_MISALIGNED_ATOMIC ,
RESPST_ERR_PSN_OUT_OF_SEQ ,
RESPST_ERR_MISSING_OPCODE_FIRST ,
RESPST_ERR_MISSING_OPCODE_LAST_C ,
RESPST_ERR_MISSING_OPCODE_LAST_D1E ,
RESPST_ERR_TOO_MANY_RDMA_ATM_REQ ,
RESPST_ERR_RNR ,
RESPST_ERR_RKEY_VIOLATION ,
2021-06-07 23:25:51 -05:00
RESPST_ERR_INVALIDATE_RKEY ,
2016-06-16 16:45:23 +03:00
RESPST_ERR_LENGTH ,
RESPST_ERR_CQ_OVERFLOW ,
RESPST_ERROR ,
RESPST_RESET ,
RESPST_DONE ,
RESPST_EXIT ,
} ;
static char * resp_state_name [ ] = {
[ RESPST_NONE ] = " NONE " ,
[ RESPST_GET_REQ ] = " GET_REQ " ,
[ RESPST_CHK_PSN ] = " CHK_PSN " ,
[ RESPST_CHK_OP_SEQ ] = " CHK_OP_SEQ " ,
[ RESPST_CHK_OP_VALID ] = " CHK_OP_VALID " ,
[ RESPST_CHK_RESOURCE ] = " CHK_RESOURCE " ,
[ RESPST_CHK_LENGTH ] = " CHK_LENGTH " ,
[ RESPST_CHK_RKEY ] = " CHK_RKEY " ,
[ RESPST_EXECUTE ] = " EXECUTE " ,
[ RESPST_READ_REPLY ] = " READ_REPLY " ,
2022-06-06 09:38:34 -05:00
[ RESPST_ATOMIC_REPLY ] = " ATOMIC_REPLY " ,
2016-06-16 16:45:23 +03:00
[ RESPST_COMPLETE ] = " COMPLETE " ,
[ RESPST_ACKNOWLEDGE ] = " ACKNOWLEDGE " ,
[ RESPST_CLEANUP ] = " CLEANUP " ,
[ RESPST_DUPLICATE_REQUEST ] = " DUPLICATE_REQUEST " ,
[ RESPST_ERR_MALFORMED_WQE ] = " ERR_MALFORMED_WQE " ,
[ RESPST_ERR_UNSUPPORTED_OPCODE ] = " ERR_UNSUPPORTED_OPCODE " ,
[ RESPST_ERR_MISALIGNED_ATOMIC ] = " ERR_MISALIGNED_ATOMIC " ,
[ RESPST_ERR_PSN_OUT_OF_SEQ ] = " ERR_PSN_OUT_OF_SEQ " ,
[ RESPST_ERR_MISSING_OPCODE_FIRST ] = " ERR_MISSING_OPCODE_FIRST " ,
[ RESPST_ERR_MISSING_OPCODE_LAST_C ] = " ERR_MISSING_OPCODE_LAST_C " ,
[ RESPST_ERR_MISSING_OPCODE_LAST_D1E ] = " ERR_MISSING_OPCODE_LAST_D1E " ,
[ RESPST_ERR_TOO_MANY_RDMA_ATM_REQ ] = " ERR_TOO_MANY_RDMA_ATM_REQ " ,
[ RESPST_ERR_RNR ] = " ERR_RNR " ,
[ RESPST_ERR_RKEY_VIOLATION ] = " ERR_RKEY_VIOLATION " ,
2021-06-07 23:25:51 -05:00
[ RESPST_ERR_INVALIDATE_RKEY ] = " ERR_INVALIDATE_RKEY_VIOLATION " ,
2016-06-16 16:45:23 +03:00
[ RESPST_ERR_LENGTH ] = " ERR_LENGTH " ,
[ RESPST_ERR_CQ_OVERFLOW ] = " ERR_CQ_OVERFLOW " ,
[ RESPST_ERROR ] = " ERROR " ,
[ RESPST_RESET ] = " RESET " ,
[ RESPST_DONE ] = " DONE " ,
[ RESPST_EXIT ] = " EXIT " ,
} ;
/* rxe_recv calls here to add a request packet to the input queue */
2019-01-20 08:21:40 -05:00
void rxe_resp_queue_pkt ( struct rxe_qp * qp , struct sk_buff * skb )
2016-06-16 16:45:23 +03:00
{
int must_sched ;
struct rxe_pkt_info * pkt = SKB_TO_PKT ( skb ) ;
skb_queue_tail ( & qp - > req_pkts , skb ) ;
must_sched = ( pkt - > opcode = = IB_OPCODE_RC_RDMA_READ_REQUEST ) | |
( skb_queue_len ( & qp - > req_pkts ) > 1 ) ;
rxe_run_task ( & qp - > resp . task , must_sched ) ;
}
static inline enum resp_states get_req ( struct rxe_qp * qp ,
struct rxe_pkt_info * * pkt_p )
{
struct sk_buff * skb ;
if ( qp - > resp . state = = QP_STATE_ERROR ) {
2018-10-19 04:53:00 -04:00
while ( ( skb = skb_dequeue ( & qp - > req_pkts ) ) ) {
2022-03-03 18:08:05 -06:00
rxe_put ( qp ) ;
2016-06-16 16:45:23 +03:00
kfree_skb ( skb ) ;
2021-01-28 17:33:19 -06:00
ib_device_put ( qp - > ibqp . device ) ;
2016-06-16 16:45:23 +03:00
}
/* go drain recv wr queue */
return RESPST_CHK_RESOURCE ;
}
skb = skb_peek ( & qp - > req_pkts ) ;
if ( ! skb )
return RESPST_EXIT ;
* pkt_p = SKB_TO_PKT ( skb ) ;
return ( qp - > resp . res ) ? RESPST_READ_REPLY : RESPST_CHK_PSN ;
}
static enum resp_states check_psn ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
int diff = psn_compare ( pkt - > psn , qp - > resp . psn ) ;
2017-03-10 18:23:56 +02:00
struct rxe_dev * rxe = to_rdev ( qp - > ibqp . device ) ;
2016-06-16 16:45:23 +03:00
switch ( qp_type ( qp ) ) {
case IB_QPT_RC :
if ( diff > 0 ) {
if ( qp - > resp . sent_psn_nak )
return RESPST_CLEANUP ;
qp - > resp . sent_psn_nak = 1 ;
2017-03-10 18:23:56 +02:00
rxe_counter_inc ( rxe , RXE_CNT_OUT_OF_SEQ_REQ ) ;
2016-06-16 16:45:23 +03:00
return RESPST_ERR_PSN_OUT_OF_SEQ ;
} else if ( diff < 0 ) {
2017-03-10 18:23:56 +02:00
rxe_counter_inc ( rxe , RXE_CNT_DUP_REQ ) ;
2016-06-16 16:45:23 +03:00
return RESPST_DUPLICATE_REQUEST ;
}
if ( qp - > resp . sent_psn_nak )
qp - > resp . sent_psn_nak = 0 ;
break ;
case IB_QPT_UC :
if ( qp - > resp . drop_msg | | diff ! = 0 ) {
if ( pkt - > mask & RXE_START_MASK ) {
qp - > resp . drop_msg = 0 ;
return RESPST_CHK_OP_SEQ ;
}
qp - > resp . drop_msg = 1 ;
return RESPST_CLEANUP ;
}
break ;
default :
break ;
}
return RESPST_CHK_OP_SEQ ;
}
static enum resp_states check_op_seq ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
switch ( qp_type ( qp ) ) {
case IB_QPT_RC :
switch ( qp - > resp . opcode ) {
case IB_OPCODE_RC_SEND_FIRST :
case IB_OPCODE_RC_SEND_MIDDLE :
switch ( pkt - > opcode ) {
case IB_OPCODE_RC_SEND_MIDDLE :
case IB_OPCODE_RC_SEND_LAST :
case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
return RESPST_CHK_OP_VALID ;
default :
return RESPST_ERR_MISSING_OPCODE_LAST_C ;
}
case IB_OPCODE_RC_RDMA_WRITE_FIRST :
case IB_OPCODE_RC_RDMA_WRITE_MIDDLE :
switch ( pkt - > opcode ) {
case IB_OPCODE_RC_RDMA_WRITE_MIDDLE :
case IB_OPCODE_RC_RDMA_WRITE_LAST :
case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
return RESPST_CHK_OP_VALID ;
default :
return RESPST_ERR_MISSING_OPCODE_LAST_C ;
}
default :
switch ( pkt - > opcode ) {
case IB_OPCODE_RC_SEND_MIDDLE :
case IB_OPCODE_RC_SEND_LAST :
case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
case IB_OPCODE_RC_RDMA_WRITE_MIDDLE :
case IB_OPCODE_RC_RDMA_WRITE_LAST :
case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
return RESPST_ERR_MISSING_OPCODE_FIRST ;
default :
return RESPST_CHK_OP_VALID ;
}
}
break ;
case IB_QPT_UC :
switch ( qp - > resp . opcode ) {
case IB_OPCODE_UC_SEND_FIRST :
case IB_OPCODE_UC_SEND_MIDDLE :
switch ( pkt - > opcode ) {
case IB_OPCODE_UC_SEND_MIDDLE :
case IB_OPCODE_UC_SEND_LAST :
case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
return RESPST_CHK_OP_VALID ;
default :
return RESPST_ERR_MISSING_OPCODE_LAST_D1E ;
}
case IB_OPCODE_UC_RDMA_WRITE_FIRST :
case IB_OPCODE_UC_RDMA_WRITE_MIDDLE :
switch ( pkt - > opcode ) {
case IB_OPCODE_UC_RDMA_WRITE_MIDDLE :
case IB_OPCODE_UC_RDMA_WRITE_LAST :
case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
return RESPST_CHK_OP_VALID ;
default :
return RESPST_ERR_MISSING_OPCODE_LAST_D1E ;
}
default :
switch ( pkt - > opcode ) {
case IB_OPCODE_UC_SEND_MIDDLE :
case IB_OPCODE_UC_SEND_LAST :
case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
case IB_OPCODE_UC_RDMA_WRITE_MIDDLE :
case IB_OPCODE_UC_RDMA_WRITE_LAST :
case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
qp - > resp . drop_msg = 1 ;
return RESPST_CLEANUP ;
default :
return RESPST_CHK_OP_VALID ;
}
}
break ;
default :
return RESPST_CHK_OP_VALID ;
}
}
static enum resp_states check_op_valid ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
switch ( qp_type ( qp ) ) {
case IB_QPT_RC :
if ( ( ( pkt - > mask & RXE_READ_MASK ) & &
! ( qp - > attr . qp_access_flags & IB_ACCESS_REMOTE_READ ) ) | |
( ( pkt - > mask & RXE_WRITE_MASK ) & &
! ( qp - > attr . qp_access_flags & IB_ACCESS_REMOTE_WRITE ) ) | |
( ( pkt - > mask & RXE_ATOMIC_MASK ) & &
! ( qp - > attr . qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ) ) ) {
return RESPST_ERR_UNSUPPORTED_OPCODE ;
}
break ;
case IB_QPT_UC :
if ( ( pkt - > mask & RXE_WRITE_MASK ) & &
! ( qp - > attr . qp_access_flags & IB_ACCESS_REMOTE_WRITE ) ) {
qp - > resp . drop_msg = 1 ;
return RESPST_CLEANUP ;
}
break ;
case IB_QPT_UD :
case IB_QPT_GSI :
break ;
default :
2017-01-10 11:15:47 -08:00
WARN_ON_ONCE ( 1 ) ;
2016-06-16 16:45:23 +03:00
break ;
}
return RESPST_CHK_RESOURCE ;
}
static enum resp_states get_srq_wqe ( struct rxe_qp * qp )
{
struct rxe_srq * srq = qp - > srq ;
struct rxe_queue * q = srq - > rq . queue ;
struct rxe_recv_wqe * wqe ;
struct ib_event ev ;
2021-05-27 14:47:48 -05:00
unsigned int count ;
2021-06-17 23:57:41 -05:00
size_t size ;
2022-02-15 13:44:49 -06:00
unsigned long flags ;
2016-06-16 16:45:23 +03:00
if ( srq - > error )
return RESPST_ERR_RNR ;
2022-02-15 13:44:49 -06:00
spin_lock_irqsave ( & srq - > rq . consumer_lock , flags ) ;
2016-06-16 16:45:23 +03:00
2021-09-14 11:42:03 -05:00
wqe = queue_head ( q , QUEUE_TYPE_FROM_CLIENT ) ;
2016-06-16 16:45:23 +03:00
if ( ! wqe ) {
2022-02-15 13:44:49 -06:00
spin_unlock_irqrestore ( & srq - > rq . consumer_lock , flags ) ;
2016-06-16 16:45:23 +03:00
return RESPST_ERR_RNR ;
}
2021-06-17 23:57:41 -05:00
/* don't trust user space data */
if ( unlikely ( wqe - > dma . num_sge > srq - > rq . max_sge ) ) {
2022-02-15 13:44:49 -06:00
spin_unlock_irqrestore ( & srq - > rq . consumer_lock , flags ) ;
2021-06-17 23:57:41 -05:00
pr_warn ( " %s: invalid num_sge in SRQ entry \n " , __func__ ) ;
return RESPST_ERR_MALFORMED_WQE ;
}
2021-07-29 17:00:38 -05:00
size = sizeof ( * wqe ) + wqe - > dma . num_sge * sizeof ( struct rxe_sge ) ;
2021-06-17 23:57:41 -05:00
memcpy ( & qp - > resp . srq_wqe , wqe , size ) ;
2016-06-16 16:45:23 +03:00
qp - > resp . wqe = & qp - > resp . srq_wqe . wqe ;
2021-09-14 11:42:03 -05:00
queue_advance_consumer ( q , QUEUE_TYPE_FROM_CLIENT ) ;
count = queue_count ( q , QUEUE_TYPE_FROM_CLIENT ) ;
2016-06-16 16:45:23 +03:00
2021-05-27 14:47:48 -05:00
if ( srq - > limit & & srq - > ibsrq . event_handler & & ( count < srq - > limit ) ) {
2016-06-16 16:45:23 +03:00
srq - > limit = 0 ;
goto event ;
}
2022-02-15 13:44:49 -06:00
spin_unlock_irqrestore ( & srq - > rq . consumer_lock , flags ) ;
2016-06-16 16:45:23 +03:00
return RESPST_CHK_LENGTH ;
event :
2022-02-15 13:44:49 -06:00
spin_unlock_irqrestore ( & srq - > rq . consumer_lock , flags ) ;
2016-06-16 16:45:23 +03:00
ev . device = qp - > ibqp . device ;
ev . element . srq = qp - > ibqp . srq ;
ev . event = IB_EVENT_SRQ_LIMIT_REACHED ;
srq - > ibsrq . event_handler ( & ev , srq - > ibsrq . srq_context ) ;
return RESPST_CHK_LENGTH ;
}
static enum resp_states check_resource ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
struct rxe_srq * srq = qp - > srq ;
if ( qp - > resp . state = = QP_STATE_ERROR ) {
if ( qp - > resp . wqe ) {
qp - > resp . status = IB_WC_WR_FLUSH_ERR ;
return RESPST_COMPLETE ;
} else if ( ! srq ) {
2021-09-14 11:42:03 -05:00
qp - > resp . wqe = queue_head ( qp - > rq . queue ,
QUEUE_TYPE_FROM_CLIENT ) ;
2016-06-16 16:45:23 +03:00
if ( qp - > resp . wqe ) {
qp - > resp . status = IB_WC_WR_FLUSH_ERR ;
return RESPST_COMPLETE ;
} else {
return RESPST_EXIT ;
}
} else {
return RESPST_EXIT ;
}
}
2021-09-14 16:02:52 +08:00
if ( pkt - > mask & RXE_READ_OR_ATOMIC_MASK ) {
2016-06-16 16:45:23 +03:00
/* it is the requesters job to not send
* too many read / atomic ops , we just
* recycle the responder resource queue
*/
2016-09-28 20:26:44 +00:00
if ( likely ( qp - > attr . max_dest_rd_atomic > 0 ) )
2016-06-16 16:45:23 +03:00
return RESPST_CHK_LENGTH ;
else
return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ ;
}
if ( pkt - > mask & RXE_RWR_MASK ) {
if ( srq )
return get_srq_wqe ( qp ) ;
2021-09-14 11:42:03 -05:00
qp - > resp . wqe = queue_head ( qp - > rq . queue ,
QUEUE_TYPE_FROM_CLIENT ) ;
2016-06-16 16:45:23 +03:00
return ( qp - > resp . wqe ) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR ;
}
return RESPST_CHK_LENGTH ;
}
static enum resp_states check_length ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
switch ( qp_type ( qp ) ) {
case IB_QPT_RC :
return RESPST_CHK_RKEY ;
case IB_QPT_UC :
return RESPST_CHK_RKEY ;
default :
return RESPST_CHK_RKEY ;
}
}
static enum resp_states check_rkey ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
2021-03-25 16:24:26 -05:00
struct rxe_mr * mr = NULL ;
2021-06-07 23:25:52 -05:00
struct rxe_mw * mw = NULL ;
2016-06-16 16:45:23 +03:00
u64 va ;
u32 rkey ;
u32 resid ;
u32 pktlen ;
int mtu = qp - > mtu ;
enum resp_states state ;
int access ;
2021-09-14 16:02:51 +08:00
if ( pkt - > mask & RXE_READ_OR_WRITE_MASK ) {
2016-06-16 16:45:23 +03:00
if ( pkt - > mask & RXE_RETH_MASK ) {
qp - > resp . va = reth_va ( pkt ) ;
2021-06-07 23:25:52 -05:00
qp - > resp . offset = 0 ;
2016-06-16 16:45:23 +03:00
qp - > resp . rkey = reth_rkey ( pkt ) ;
qp - > resp . resid = reth_len ( pkt ) ;
2019-06-27 16:06:43 +02:00
qp - > resp . length = reth_len ( pkt ) ;
2016-06-16 16:45:23 +03:00
}
access = ( pkt - > mask & RXE_READ_MASK ) ? IB_ACCESS_REMOTE_READ
: IB_ACCESS_REMOTE_WRITE ;
} else if ( pkt - > mask & RXE_ATOMIC_MASK ) {
qp - > resp . va = atmeth_va ( pkt ) ;
2021-06-07 23:25:52 -05:00
qp - > resp . offset = 0 ;
2016-06-16 16:45:23 +03:00
qp - > resp . rkey = atmeth_rkey ( pkt ) ;
qp - > resp . resid = sizeof ( u64 ) ;
access = IB_ACCESS_REMOTE_ATOMIC ;
} else {
return RESPST_EXECUTE ;
}
2016-11-23 12:39:21 -05:00
/* A zero-byte op is not required to set an addr or rkey. */
2021-09-14 16:02:51 +08:00
if ( ( pkt - > mask & RXE_READ_OR_WRITE_MASK ) & &
2016-11-23 12:39:21 -05:00
( pkt - > mask & RXE_RETH_MASK ) & &
reth_len ( pkt ) = = 0 ) {
return RESPST_EXECUTE ;
}
2016-06-16 16:45:23 +03:00
va = qp - > resp . va ;
rkey = qp - > resp . rkey ;
resid = qp - > resp . resid ;
pktlen = payload_size ( pkt ) ;
2021-06-07 23:25:52 -05:00
if ( rkey_is_mw ( rkey ) ) {
mw = rxe_lookup_mw ( qp , access , rkey ) ;
if ( ! mw ) {
pr_err ( " %s: no MW matches rkey %#x \n " , __func__ , rkey ) ;
state = RESPST_ERR_RKEY_VIOLATION ;
goto err ;
}
2016-06-16 16:45:23 +03:00
2021-06-07 23:25:52 -05:00
mr = mw - > mr ;
if ( ! mr ) {
pr_err ( " %s: MW doesn't have an MR \n " , __func__ ) ;
state = RESPST_ERR_RKEY_VIOLATION ;
goto err ;
}
if ( mw - > access & IB_ZERO_BASED )
qp - > resp . offset = mw - > addr ;
2022-03-03 18:08:05 -06:00
rxe_put ( mw ) ;
rxe_get ( mr ) ;
2021-06-07 23:25:52 -05:00
} else {
mr = lookup_mr ( qp - > pd , access , rkey , RXE_LOOKUP_REMOTE ) ;
if ( ! mr ) {
pr_err ( " %s: no MR matches rkey %#x \n " , __func__ , rkey ) ;
state = RESPST_ERR_RKEY_VIOLATION ;
goto err ;
}
2016-06-16 16:45:23 +03:00
}
2021-06-07 23:25:52 -05:00
if ( mr_check_range ( mr , va + qp - > resp . offset , resid ) ) {
2016-06-16 16:45:23 +03:00
state = RESPST_ERR_RKEY_VIOLATION ;
2017-01-10 11:15:51 -08:00
goto err ;
2016-06-16 16:45:23 +03:00
}
if ( pkt - > mask & RXE_WRITE_MASK ) {
if ( resid > mtu ) {
if ( pktlen ! = mtu | | bth_pad ( pkt ) ) {
state = RESPST_ERR_LENGTH ;
2017-01-10 11:15:51 -08:00
goto err ;
2016-06-16 16:45:23 +03:00
}
} else {
if ( pktlen ! = resid ) {
state = RESPST_ERR_LENGTH ;
2017-01-10 11:15:51 -08:00
goto err ;
2016-06-16 16:45:23 +03:00
}
if ( ( bth_pad ( pkt ) ! = ( 0x3 & ( - resid ) ) ) ) {
/* This case may not be exactly that
* but nothing else fits .
*/
state = RESPST_ERR_LENGTH ;
2017-01-10 11:15:51 -08:00
goto err ;
2016-06-16 16:45:23 +03:00
}
}
}
2017-01-10 11:15:47 -08:00
WARN_ON_ONCE ( qp - > resp . mr ) ;
2016-06-16 16:45:23 +03:00
2021-03-25 16:24:26 -05:00
qp - > resp . mr = mr ;
2016-06-16 16:45:23 +03:00
return RESPST_EXECUTE ;
2017-01-10 11:15:51 -08:00
err :
2021-03-25 16:24:26 -05:00
if ( mr )
2022-03-03 18:08:05 -06:00
rxe_put ( mr ) ;
2021-06-07 23:25:52 -05:00
if ( mw )
2022-03-03 18:08:05 -06:00
rxe_put ( mw ) ;
2021-06-07 23:25:52 -05:00
2016-06-16 16:45:23 +03:00
return state ;
}
static enum resp_states send_data_in ( struct rxe_qp * qp , void * data_addr ,
int data_len )
{
int err ;
2018-04-23 03:57:58 -04:00
err = copy_data ( qp - > pd , IB_ACCESS_LOCAL_WRITE , & qp - > resp . wqe - > dma ,
2021-07-06 23:00:36 -05:00
data_addr , data_len , RXE_TO_MR_OBJ ) ;
2016-06-16 16:45:23 +03:00
if ( unlikely ( err ) )
return ( err = = - ENOSPC ) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE ;
return RESPST_NONE ;
}
static enum resp_states write_data_in ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
enum resp_states rc = RESPST_NONE ;
int err ;
int data_len = payload_size ( pkt ) ;
2021-06-07 23:25:52 -05:00
err = rxe_mr_copy ( qp - > resp . mr , qp - > resp . va + qp - > resp . offset ,
2021-07-06 23:00:36 -05:00
payload_addr ( pkt ) , data_len , RXE_TO_MR_OBJ ) ;
2016-06-16 16:45:23 +03:00
if ( err ) {
rc = RESPST_ERR_RKEY_VIOLATION ;
goto out ;
}
qp - > resp . va + = data_len ;
qp - > resp . resid - = data_len ;
out :
return rc ;
}
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK ( atomic_ops_lock ) ;
2022-06-06 09:38:35 -05:00
static struct resp_res * rxe_prepare_atomic_res ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
struct resp_res * res ;
res = & qp - > resp . resources [ qp - > resp . res_head ] ;
rxe_advance_resp_resource ( qp ) ;
free_rd_atomic_resource ( qp , res ) ;
res - > type = RXE_ATOMIC_MASK ;
res - > first_psn = pkt - > psn ;
res - > last_psn = pkt - > psn ;
res - > cur_psn = pkt - > psn ;
res - > replay = 0 ;
return res ;
}
2022-06-06 09:38:34 -05:00
static enum resp_states rxe_atomic_reply ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
2016-06-16 16:45:23 +03:00
{
u64 * vaddr ;
enum resp_states ret ;
2021-03-25 16:24:26 -05:00
struct rxe_mr * mr = qp - > resp . mr ;
2022-06-06 09:38:35 -05:00
struct resp_res * res = qp - > resp . res ;
2022-06-06 09:38:36 -05:00
u64 value ;
2022-06-06 09:38:35 -05:00
if ( ! res ) {
res = rxe_prepare_atomic_res ( qp , pkt ) ;
qp - > resp . res = res ;
}
2016-06-16 16:45:23 +03:00
2021-03-25 16:24:26 -05:00
if ( mr - > state ! = RXE_MR_STATE_VALID ) {
2016-06-16 16:45:23 +03:00
ret = RESPST_ERR_RKEY_VIOLATION ;
goto out ;
}
2021-06-07 23:25:52 -05:00
vaddr = iova_to_vaddr ( mr , qp - > resp . va + qp - > resp . offset , sizeof ( u64 ) ) ;
2016-06-16 16:45:23 +03:00
/* check vaddr is 8 bytes aligned. */
if ( ! vaddr | | ( uintptr_t ) vaddr & 7 ) {
ret = RESPST_ERR_MISALIGNED_ATOMIC ;
goto out ;
}
spin_lock_bh ( & atomic_ops_lock ) ;
2022-06-06 09:38:36 -05:00
res - > atomic . orig_val = value = * vaddr ;
2016-06-16 16:45:23 +03:00
2022-04-07 16:04:45 -05:00
if ( pkt - > opcode = = IB_OPCODE_RC_COMPARE_SWAP ) {
2022-06-06 09:38:36 -05:00
if ( value = = atmeth_comp ( pkt ) )
value = atmeth_swap_add ( pkt ) ;
2016-06-16 16:45:23 +03:00
} else {
2022-06-06 09:38:36 -05:00
value + = atmeth_swap_add ( pkt ) ;
2016-06-16 16:45:23 +03:00
}
2022-06-06 09:38:36 -05:00
* vaddr = value ;
2016-06-16 16:45:23 +03:00
spin_unlock_bh ( & atomic_ops_lock ) ;
2022-06-06 09:38:34 -05:00
qp - > resp . msn + + ;
/* next expected psn, read handles this separately */
qp - > resp . psn = ( pkt - > psn + 1 ) & BTH_PSN_MASK ;
qp - > resp . ack_psn = qp - > resp . psn ;
qp - > resp . opcode = pkt - > opcode ;
qp - > resp . status = IB_WC_SUCCESS ;
ret = RESPST_ACKNOWLEDGE ;
2016-06-16 16:45:23 +03:00
out :
return ret ;
}
static struct sk_buff * prepare_ack_packet ( struct rxe_qp * qp ,
struct rxe_pkt_info * ack ,
int opcode ,
int payload ,
u32 psn ,
2021-07-06 23:00:36 -05:00
u8 syndrome )
2016-06-16 16:45:23 +03:00
{
struct rxe_dev * rxe = to_rdev ( qp - > ibqp . device ) ;
struct sk_buff * skb ;
int paylen ;
int pad ;
int err ;
/*
* allocate packet
*/
pad = ( - payload ) & 0x3 ;
paylen = rxe_opcode [ opcode ] . length + payload + pad + RXE_ICRC_SIZE ;
2017-01-10 11:15:53 -08:00
skb = rxe_init_packet ( rxe , & qp - > pri_av , paylen , ack ) ;
2016-06-16 16:45:23 +03:00
if ( ! skb )
return NULL ;
ack - > qp = qp ;
ack - > opcode = opcode ;
ack - > mask = rxe_opcode [ opcode ] . mask ;
ack - > paylen = paylen ;
ack - > psn = psn ;
2021-06-17 23:57:42 -05:00
bth_init ( ack , opcode , 0 , 0 , pad , IB_DEFAULT_PKEY_FULL ,
qp - > attr . dest_qp_num , 0 , psn ) ;
2016-06-16 16:45:23 +03:00
if ( ack - > mask & RXE_AETH_MASK ) {
aeth_set_syn ( ack , syndrome ) ;
aeth_set_msn ( ack , qp - > resp . msn ) ;
}
if ( ack - > mask & RXE_ATMACK_MASK )
2022-06-06 09:38:36 -05:00
atmack_set_orig ( ack , qp - > resp . res - > atomic . orig_val ) ;
2016-06-16 16:45:23 +03:00
2022-03-03 18:07:57 -06:00
err = rxe_prepare ( & qp - > pri_av , ack , skb ) ;
2016-06-16 16:45:23 +03:00
if ( err ) {
kfree_skb ( skb ) ;
return NULL ;
}
return skb ;
}
2022-03-03 18:07:58 -06:00
static struct resp_res * rxe_prepare_read_res ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
struct resp_res * res ;
u32 pkts ;
res = & qp - > resp . resources [ qp - > resp . res_head ] ;
rxe_advance_resp_resource ( qp ) ;
free_rd_atomic_resource ( qp , res ) ;
res - > type = RXE_READ_MASK ;
res - > replay = 0 ;
res - > read . va = qp - > resp . va + qp - > resp . offset ;
res - > read . va_org = qp - > resp . va + qp - > resp . offset ;
res - > read . resid = qp - > resp . resid ;
res - > read . length = qp - > resp . resid ;
res - > read . rkey = qp - > resp . rkey ;
pkts = max_t ( u32 , ( reth_len ( pkt ) + qp - > mtu - 1 ) / qp - > mtu , 1 ) ;
res - > first_psn = pkt - > psn ;
res - > cur_psn = pkt - > psn ;
res - > last_psn = ( pkt - > psn + pkts - 1 ) & BTH_PSN_MASK ;
res - > state = rdatm_res_state_new ;
return res ;
}
/**
* rxe_recheck_mr - revalidate MR from rkey and get a reference
* @ qp : the qp
* @ rkey : the rkey
*
* This code allows the MR to be invalidated or deregistered or
* the MW if one was used to be invalidated or deallocated .
* It is assumed that the access permissions if originally good
* are OK and the mappings to be unchanged .
*
2022-04-10 22:06:48 -05:00
* TODO : If someone reregisters an MR to change its size or
* access permissions during the processing of an RDMA read
* we should kill the responder resource and complete the
* operation with an error .
*
2022-03-03 18:07:58 -06:00
* Return : mr on success else NULL
*/
static struct rxe_mr * rxe_recheck_mr ( struct rxe_qp * qp , u32 rkey )
{
struct rxe_dev * rxe = to_rdev ( qp - > ibqp . device ) ;
struct rxe_mr * mr ;
struct rxe_mw * mw ;
if ( rkey_is_mw ( rkey ) ) {
mw = rxe_pool_get_index ( & rxe - > mw_pool , rkey > > 8 ) ;
2022-04-10 22:06:48 -05:00
if ( ! mw )
2022-03-03 18:07:58 -06:00
return NULL ;
2022-04-10 22:06:48 -05:00
mr = mw - > mr ;
if ( mw - > rkey ! = rkey | | mw - > state ! = RXE_MW_STATE_VALID | |
! mr | | mr - > state ! = RXE_MR_STATE_VALID ) {
2022-03-03 18:08:05 -06:00
rxe_put ( mw ) ;
2022-03-03 18:07:58 -06:00
return NULL ;
}
2022-04-10 22:06:48 -05:00
rxe_get ( mr ) ;
2022-03-03 18:08:05 -06:00
rxe_put ( mw ) ;
2022-04-10 22:06:48 -05:00
return mr ;
2022-03-03 18:07:58 -06:00
}
2022-04-10 22:06:48 -05:00
mr = rxe_pool_get_index ( & rxe - > mr_pool , rkey > > 8 ) ;
if ( ! mr )
return NULL ;
if ( mr - > rkey ! = rkey | | mr - > state ! = RXE_MR_STATE_VALID ) {
2022-03-03 18:08:05 -06:00
rxe_put ( mr ) ;
2022-03-03 18:07:58 -06:00
return NULL ;
}
return mr ;
}
2016-06-16 16:45:23 +03:00
/* RDMA read response. If res is not NULL, then we have a current RDMA request
* being processed or replayed .
*/
static enum resp_states read_reply ( struct rxe_qp * qp ,
struct rxe_pkt_info * req_pkt )
{
struct rxe_pkt_info ack_pkt ;
struct sk_buff * skb ;
int mtu = qp - > mtu ;
enum resp_states state ;
int payload ;
int opcode ;
int err ;
struct resp_res * res = qp - > resp . res ;
2022-03-03 18:07:58 -06:00
struct rxe_mr * mr ;
2016-06-16 16:45:23 +03:00
if ( ! res ) {
2022-03-03 18:07:58 -06:00
res = rxe_prepare_read_res ( qp , req_pkt ) ;
qp - > resp . res = res ;
2016-06-16 16:45:23 +03:00
}
if ( res - > state = = rdatm_res_state_new ) {
2022-04-18 12:41:04 -05:00
if ( ! res - > replay ) {
mr = qp - > resp . mr ;
qp - > resp . mr = NULL ;
} else {
mr = rxe_recheck_mr ( qp , res - > read . rkey ) ;
if ( ! mr )
return RESPST_ERR_RKEY_VIOLATION ;
}
2022-03-03 18:07:58 -06:00
2016-06-16 16:45:23 +03:00
if ( res - > read . resid < = mtu )
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY ;
else
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ;
} else {
2022-03-03 18:07:58 -06:00
mr = rxe_recheck_mr ( qp , res - > read . rkey ) ;
if ( ! mr )
return RESPST_ERR_RKEY_VIOLATION ;
2016-06-16 16:45:23 +03:00
if ( res - > read . resid > mtu )
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE ;
else
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST ;
}
res - > state = rdatm_res_state_next ;
payload = min_t ( int , res - > read . resid , mtu ) ;
2022-06-23 21:16:27 +08:00
skb = prepare_ack_packet ( qp , & ack_pkt , opcode , payload ,
2021-07-06 23:00:36 -05:00
res - > cur_psn , AETH_ACK_UNLIMITED ) ;
2016-06-16 16:45:23 +03:00
if ( ! skb )
return RESPST_ERR_RNR ;
2022-03-03 18:07:58 -06:00
err = rxe_mr_copy ( mr , res - > read . va , payload_addr ( & ack_pkt ) ,
2021-07-06 23:00:36 -05:00
payload , RXE_FROM_MR_OBJ ) ;
2016-06-16 16:45:23 +03:00
if ( err )
pr_err ( " Failed copying memory \n " ) ;
2022-03-03 18:07:58 -06:00
if ( mr )
2022-03-03 18:08:05 -06:00
rxe_put ( mr ) ;
2016-06-16 16:45:23 +03:00
2019-12-02 20:03:20 -06:00
if ( bth_pad ( & ack_pkt ) ) {
u8 * pad = payload_addr ( & ack_pkt ) + payload ;
memset ( pad , 0 , bth_pad ( & ack_pkt ) ) ;
}
2016-06-16 16:45:23 +03:00
2018-11-03 08:13:18 -04:00
err = rxe_xmit_packet ( qp , & ack_pkt , skb ) ;
2016-06-16 16:45:23 +03:00
if ( err ) {
pr_err ( " Failed sending RDMA reply. \n " ) ;
return RESPST_ERR_RNR ;
}
res - > read . va + = payload ;
res - > read . resid - = payload ;
res - > cur_psn = ( res - > cur_psn + 1 ) & BTH_PSN_MASK ;
if ( res - > read . resid > 0 ) {
state = RESPST_DONE ;
} else {
qp - > resp . res = NULL ;
2018-06-12 18:20:49 -07:00
if ( ! res - > replay )
qp - > resp . opcode = - 1 ;
2016-11-23 12:39:19 -05:00
if ( psn_compare ( res - > cur_psn , qp - > resp . psn ) > = 0 )
qp - > resp . psn = res - > cur_psn ;
2016-06-16 16:45:23 +03:00
state = RESPST_CLEANUP ;
}
return state ;
}
2021-06-07 23:25:51 -05:00
static int invalidate_rkey ( struct rxe_qp * qp , u32 rkey )
{
if ( rkey_is_mw ( rkey ) )
return rxe_invalidate_mw ( qp , rkey ) ;
else
return rxe_invalidate_mr ( qp , rkey ) ;
}
2016-06-16 16:45:23 +03:00
/* Executes a new request. A retried request never reach that function (send
* and writes are discarded , and reads and atomics are retried elsewhere .
*/
static enum resp_states execute ( struct rxe_qp * qp , struct rxe_pkt_info * pkt )
{
enum resp_states err ;
2021-06-17 23:57:40 -05:00
struct sk_buff * skb = PKT_TO_SKB ( pkt ) ;
union rdma_network_hdr hdr ;
2016-06-16 16:45:23 +03:00
if ( pkt - > mask & RXE_SEND_MASK ) {
if ( qp_type ( qp ) = = IB_QPT_UD | |
qp_type ( qp ) = = IB_QPT_GSI ) {
2021-06-17 23:57:40 -05:00
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
memset ( & hdr . reserved , 0 ,
sizeof ( hdr . reserved ) ) ;
memcpy ( & hdr . roce4grh , ip_hdr ( skb ) ,
sizeof ( hdr . roce4grh ) ) ;
err = send_data_in ( qp , & hdr , sizeof ( hdr ) ) ;
} else {
err = send_data_in ( qp , ipv6_hdr ( skb ) ,
sizeof ( hdr ) ) ;
}
2016-06-16 16:45:23 +03:00
if ( err )
return err ;
}
err = send_data_in ( qp , payload_addr ( pkt ) , payload_size ( pkt ) ) ;
if ( err )
return err ;
} else if ( pkt - > mask & RXE_WRITE_MASK ) {
err = write_data_in ( qp , pkt ) ;
if ( err )
return err ;
} else if ( pkt - > mask & RXE_READ_MASK ) {
/* For RDMA Read we can increment the msn now. See C9-148. */
qp - > resp . msn + + ;
return RESPST_READ_REPLY ;
} else if ( pkt - > mask & RXE_ATOMIC_MASK ) {
2022-06-06 09:38:34 -05:00
return RESPST_ATOMIC_REPLY ;
2017-01-10 11:15:47 -08:00
} else {
2016-06-16 16:45:23 +03:00
/* Unreachable */
2017-01-10 11:15:47 -08:00
WARN_ON_ONCE ( 1 ) ;
}
2016-06-16 16:45:23 +03:00
2021-06-07 23:25:51 -05:00
if ( pkt - > mask & RXE_IETH_MASK ) {
u32 rkey = ieth_rkey ( pkt ) ;
err = invalidate_rkey ( qp , rkey ) ;
if ( err )
return RESPST_ERR_INVALIDATE_RKEY ;
}
2021-12-29 11:44:38 +08:00
if ( pkt - > mask & RXE_END_MASK )
/* We successfully processed this new request. */
qp - > resp . msn + + ;
2016-06-16 16:45:23 +03:00
/* next expected psn, read handles this separately */
qp - > resp . psn = ( pkt - > psn + 1 ) & BTH_PSN_MASK ;
2018-06-12 18:20:49 -07:00
qp - > resp . ack_psn = qp - > resp . psn ;
2016-06-16 16:45:23 +03:00
qp - > resp . opcode = pkt - > opcode ;
qp - > resp . status = IB_WC_SUCCESS ;
2021-12-29 11:44:38 +08:00
if ( pkt - > mask & RXE_COMP_MASK )
2016-06-16 16:45:23 +03:00
return RESPST_COMPLETE ;
2021-12-29 11:44:38 +08:00
else if ( qp_type ( qp ) = = IB_QPT_RC )
2016-06-16 16:45:23 +03:00
return RESPST_ACKNOWLEDGE ;
else
return RESPST_CLEANUP ;
}
static enum resp_states do_complete ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
struct rxe_cqe cqe ;
struct ib_wc * wc = & cqe . ibwc ;
struct ib_uverbs_wc * uwc = & cqe . uibwc ;
struct rxe_recv_wqe * wqe = qp - > resp . wqe ;
2018-11-01 09:18:46 -04:00
struct rxe_dev * rxe = to_rdev ( qp - > ibqp . device ) ;
2016-06-16 16:45:23 +03:00
2021-04-01 19:10:17 -05:00
if ( ! wqe )
goto finish ;
2016-06-16 16:45:23 +03:00
memset ( & cqe , 0 , sizeof ( cqe ) ) ;
2018-10-25 12:40:57 -07:00
if ( qp - > rcq - > is_user ) {
2021-06-07 23:25:51 -05:00
uwc - > status = qp - > resp . status ;
uwc - > qp_num = qp - > ibqp . qp_num ;
uwc - > wr_id = wqe - > wr_id ;
2018-10-25 12:40:57 -07:00
} else {
2021-06-07 23:25:51 -05:00
wc - > status = qp - > resp . status ;
wc - > qp = & qp - > ibqp ;
wc - > wr_id = wqe - > wr_id ;
2018-10-25 12:40:57 -07:00
}
2016-06-16 16:45:23 +03:00
if ( wc - > status = = IB_WC_SUCCESS ) {
2018-11-01 09:18:46 -04:00
rxe_counter_inc ( rxe , RXE_CNT_RDMA_RECV ) ;
2016-06-16 16:45:23 +03:00
wc - > opcode = ( pkt - > mask & RXE_IMMDT_MASK & &
pkt - > mask & RXE_WRITE_MASK ) ?
IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV ;
2019-06-27 16:06:43 +02:00
wc - > byte_len = ( pkt - > mask & RXE_IMMDT_MASK & &
pkt - > mask & RXE_WRITE_MASK ) ?
qp - > resp . length : wqe - > dma . length - wqe - > dma . resid ;
2016-06-16 16:45:23 +03:00
/* fields after byte_len are different between kernel and user
* space
*/
if ( qp - > rcq - > is_user ) {
uwc - > wc_flags = IB_WC_GRH ;
if ( pkt - > mask & RXE_IMMDT_MASK ) {
uwc - > wc_flags | = IB_WC_WITH_IMM ;
2018-01-11 14:43:05 -07:00
uwc - > ex . imm_data = immdt_imm ( pkt ) ;
2016-06-16 16:45:23 +03:00
}
if ( pkt - > mask & RXE_IETH_MASK ) {
uwc - > wc_flags | = IB_WC_WITH_INVALIDATE ;
uwc - > ex . invalidate_rkey = ieth_rkey ( pkt ) ;
}
if ( pkt - > mask & RXE_DETH_MASK )
uwc - > src_qp = deth_sqp ( pkt ) ;
uwc - > port_num = qp - > attr . port_num ;
} else {
struct sk_buff * skb = PKT_TO_SKB ( pkt ) ;
wc - > wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE ;
if ( skb - > protocol = = htons ( ETH_P_IP ) )
wc - > network_hdr_type = RDMA_NETWORK_IPV4 ;
else
wc - > network_hdr_type = RDMA_NETWORK_IPV6 ;
2021-01-20 17:19:13 +01:00
if ( is_vlan_dev ( skb - > dev ) ) {
wc - > wc_flags | = IB_WC_WITH_VLAN ;
wc - > vlan_id = vlan_dev_vlan_id ( skb - > dev ) ;
}
2016-06-16 16:45:23 +03:00
if ( pkt - > mask & RXE_IMMDT_MASK ) {
wc - > wc_flags | = IB_WC_WITH_IMM ;
wc - > ex . imm_data = immdt_imm ( pkt ) ;
}
if ( pkt - > mask & RXE_IETH_MASK ) {
wc - > wc_flags | = IB_WC_WITH_INVALIDATE ;
wc - > ex . invalidate_rkey = ieth_rkey ( pkt ) ;
}
if ( pkt - > mask & RXE_DETH_MASK )
wc - > src_qp = deth_sqp ( pkt ) ;
wc - > port_num = qp - > attr . port_num ;
}
}
/* have copy for srq and reference for !srq */
2021-09-14 11:42:03 -05:00
if ( ! qp - > srq )
queue_advance_consumer ( qp - > rq . queue , QUEUE_TYPE_FROM_CLIENT ) ;
2016-06-16 16:45:23 +03:00
qp - > resp . wqe = NULL ;
if ( rxe_cq_post ( qp - > rcq , & cqe , pkt ? bth_se ( pkt ) : 1 ) )
return RESPST_ERR_CQ_OVERFLOW ;
2021-04-01 19:10:17 -05:00
finish :
if ( unlikely ( qp - > resp . state = = QP_STATE_ERROR ) )
2016-06-16 16:45:23 +03:00
return RESPST_CHK_RESOURCE ;
2021-04-01 19:10:17 -05:00
if ( unlikely ( ! pkt ) )
2016-06-16 16:45:23 +03:00
return RESPST_DONE ;
2021-04-01 19:10:17 -05:00
if ( qp_type ( qp ) = = IB_QPT_RC )
2016-06-16 16:45:23 +03:00
return RESPST_ACKNOWLEDGE ;
else
return RESPST_CLEANUP ;
}
2022-06-23 21:16:27 +08:00
static int send_ack ( struct rxe_qp * qp , u8 syndrome , u32 psn )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
struct rxe_pkt_info ack_pkt ;
struct sk_buff * skb ;
2022-06-23 21:16:27 +08:00
skb = prepare_ack_packet ( qp , & ack_pkt , IB_OPCODE_RC_ACKNOWLEDGE ,
2021-07-06 23:00:36 -05:00
0 , psn , syndrome ) ;
2016-06-16 16:45:23 +03:00
if ( ! skb ) {
err = - ENOMEM ;
goto err1 ;
}
2018-11-03 08:13:18 -04:00
err = rxe_xmit_packet ( qp , & ack_pkt , skb ) ;
2018-04-26 00:41:10 -04:00
if ( err )
2016-06-16 16:45:23 +03:00
pr_err_ratelimited ( " Failed sending ack \n " ) ;
err1 :
return err ;
}
2022-06-06 09:38:35 -05:00
static int send_atomic_ack ( struct rxe_qp * qp , u8 syndrome , u32 psn )
2022-06-06 09:38:33 -05:00
{
int err = 0 ;
2016-06-16 16:45:23 +03:00
struct rxe_pkt_info ack_pkt ;
struct sk_buff * skb ;
2022-06-06 09:38:35 -05:00
struct resp_res * res = qp - > resp . res ;
2016-06-16 16:45:23 +03:00
2022-06-23 21:16:27 +08:00
skb = prepare_ack_packet ( qp , & ack_pkt , IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE ,
2022-06-06 09:38:35 -05:00
0 , psn , syndrome ) ;
2016-06-16 16:45:23 +03:00
if ( ! skb ) {
2022-06-06 09:38:33 -05:00
err = - ENOMEM ;
2016-06-16 16:45:23 +03:00
goto out ;
}
2018-04-10 09:37:39 -04:00
skb_get ( skb ) ;
2022-06-06 09:38:33 -05:00
2016-06-16 16:45:23 +03:00
res - > atomic . skb = skb ;
2022-06-06 09:38:33 -05:00
err = rxe_xmit_packet ( qp , & ack_pkt , skb ) ;
if ( err ) {
2016-06-16 16:45:23 +03:00
pr_err_ratelimited ( " Failed sending ack \n " ) ;
2022-03-03 18:08:05 -06:00
rxe_put ( qp ) ;
2016-06-16 16:45:23 +03:00
}
2022-06-06 09:38:35 -05:00
/* have to clear this since it is used to trigger
* long read replies
*/
qp - > resp . res = NULL ;
2016-06-16 16:45:23 +03:00
out :
2022-06-06 09:38:33 -05:00
return err ;
2016-06-16 16:45:23 +03:00
}
static enum resp_states acknowledge ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
if ( qp_type ( qp ) ! = IB_QPT_RC )
return RESPST_CLEANUP ;
if ( qp - > resp . aeth_syndrome ! = AETH_ACK_UNLIMITED )
2022-06-23 21:16:27 +08:00
send_ack ( qp , qp - > resp . aeth_syndrome , pkt - > psn ) ;
2016-06-16 16:45:23 +03:00
else if ( pkt - > mask & RXE_ATOMIC_MASK )
2022-06-06 09:38:35 -05:00
send_atomic_ack ( qp , AETH_ACK_UNLIMITED , pkt - > psn ) ;
2016-06-16 16:45:23 +03:00
else if ( bth_ack ( pkt ) )
2022-06-23 21:16:27 +08:00
send_ack ( qp , AETH_ACK_UNLIMITED , pkt - > psn ) ;
2016-06-16 16:45:23 +03:00
return RESPST_CLEANUP ;
}
static enum resp_states cleanup ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
struct sk_buff * skb ;
if ( pkt ) {
skb = skb_dequeue ( & qp - > req_pkts ) ;
2022-03-03 18:08:05 -06:00
rxe_put ( qp ) ;
2016-06-16 16:45:23 +03:00
kfree_skb ( skb ) ;
2021-01-28 17:33:19 -06:00
ib_device_put ( qp - > ibqp . device ) ;
2016-06-16 16:45:23 +03:00
}
if ( qp - > resp . mr ) {
2022-03-03 18:08:05 -06:00
rxe_put ( qp - > resp . mr ) ;
2016-06-16 16:45:23 +03:00
qp - > resp . mr = NULL ;
}
return RESPST_DONE ;
}
static struct resp_res * find_resource ( struct rxe_qp * qp , u32 psn )
{
int i ;
2017-08-28 16:11:52 -04:00
for ( i = 0 ; i < qp - > attr . max_dest_rd_atomic ; i + + ) {
2016-06-16 16:45:23 +03:00
struct resp_res * res = & qp - > resp . resources [ i ] ;
if ( res - > type = = 0 )
continue ;
if ( psn_compare ( psn , res - > first_psn ) > = 0 & &
psn_compare ( psn , res - > last_psn ) < = 0 ) {
return res ;
}
}
return NULL ;
}
static enum resp_states duplicate_request ( struct rxe_qp * qp ,
struct rxe_pkt_info * pkt )
{
enum resp_states rc ;
2018-06-12 18:20:49 -07:00
u32 prev_psn = ( qp - > resp . ack_psn - 1 ) & BTH_PSN_MASK ;
2016-06-16 16:45:23 +03:00
if ( pkt - > mask & RXE_SEND_MASK | |
pkt - > mask & RXE_WRITE_MASK ) {
/* SEND. Ack again and cleanup. C9-105. */
2022-06-23 21:16:27 +08:00
send_ack ( qp , AETH_ACK_UNLIMITED , prev_psn ) ;
2021-04-01 19:10:17 -05:00
return RESPST_CLEANUP ;
2016-06-16 16:45:23 +03:00
} else if ( pkt - > mask & RXE_READ_MASK ) {
struct resp_res * res ;
res = find_resource ( qp , pkt - > psn ) ;
if ( ! res ) {
/* Resource not found. Class D error. Drop the
* request .
*/
rc = RESPST_CLEANUP ;
goto out ;
} else {
/* Ensure this new request is the same as the previous
* one or a subset of it .
*/
u64 iova = reth_va ( pkt ) ;
u32 resid = reth_len ( pkt ) ;
if ( iova < res - > read . va_org | |
resid > res - > read . length | |
( iova + resid ) > ( res - > read . va_org +
res - > read . length ) ) {
rc = RESPST_CLEANUP ;
goto out ;
}
if ( reth_rkey ( pkt ) ! = res - > read . rkey ) {
rc = RESPST_CLEANUP ;
goto out ;
}
res - > cur_psn = pkt - > psn ;
res - > state = ( pkt - > psn = = res - > first_psn ) ?
rdatm_res_state_new :
rdatm_res_state_replay ;
2018-06-12 18:20:49 -07:00
res - > replay = 1 ;
2016-06-16 16:45:23 +03:00
/* Reset the resource, except length. */
res - > read . va_org = iova ;
res - > read . va = iova ;
res - > read . resid = resid ;
/* Replay the RDMA read reply. */
qp - > resp . res = res ;
rc = RESPST_READ_REPLY ;
goto out ;
}
} else {
struct resp_res * res ;
/* Find the operation in our list of responder resources. */
res = find_resource ( qp , pkt - > psn ) ;
if ( res ) {
2018-04-10 00:47:15 -04:00
skb_get ( res - > atomic . skb ) ;
2016-06-16 16:45:23 +03:00
/* Resend the result. */
2018-11-03 08:13:18 -04:00
rc = rxe_xmit_packet ( qp , pkt , res - > atomic . skb ) ;
2016-06-16 16:45:23 +03:00
if ( rc ) {
pr_err ( " Failed resending result. This flow is not handled - skb ignored \n " ) ;
rc = RESPST_CLEANUP ;
goto out ;
}
}
/* Resource not found. Class D error. Drop the request. */
rc = RESPST_CLEANUP ;
goto out ;
}
out :
return rc ;
}
/* Process a class A or C. Both are treated the same in this implementation. */
static void do_class_ac_error ( struct rxe_qp * qp , u8 syndrome ,
enum ib_wc_status status )
{
qp - > resp . aeth_syndrome = syndrome ;
qp - > resp . status = status ;
/* indicate that we should go through the ERROR state */
qp - > resp . goto_error = 1 ;
}
static enum resp_states do_class_d1e_error ( struct rxe_qp * qp )
{
/* UC */
if ( qp - > srq ) {
/* Class E */
qp - > resp . drop_msg = 1 ;
if ( qp - > resp . wqe ) {
qp - > resp . status = IB_WC_REM_INV_REQ_ERR ;
return RESPST_COMPLETE ;
} else {
return RESPST_CLEANUP ;
}
} else {
/* Class D1. This packet may be the start of a
* new message and could be valid . The previous
* message is invalid and ignored . reset the
* recv wr to its original state
*/
if ( qp - > resp . wqe ) {
qp - > resp . wqe - > dma . resid = qp - > resp . wqe - > dma . length ;
qp - > resp . wqe - > dma . cur_sge = 0 ;
qp - > resp . wqe - > dma . sge_offset = 0 ;
qp - > resp . opcode = - 1 ;
}
if ( qp - > resp . mr ) {
2022-03-03 18:08:05 -06:00
rxe_put ( qp - > resp . mr ) ;
2016-06-16 16:45:23 +03:00
qp - > resp . mr = NULL ;
}
return RESPST_CLEANUP ;
}
}
2018-01-12 15:11:58 -08:00
static void rxe_drain_req_pkts ( struct rxe_qp * qp , bool notify )
2017-01-10 11:15:49 -08:00
{
struct sk_buff * skb ;
2021-05-27 14:47:48 -05:00
struct rxe_queue * q = qp - > rq . queue ;
2017-01-10 11:15:49 -08:00
while ( ( skb = skb_dequeue ( & qp - > req_pkts ) ) ) {
2022-03-03 18:08:05 -06:00
rxe_put ( qp ) ;
2017-01-10 11:15:49 -08:00
kfree_skb ( skb ) ;
2021-01-28 17:33:19 -06:00
ib_device_put ( qp - > ibqp . device ) ;
2017-01-10 11:15:49 -08:00
}
2017-06-27 12:19:38 +03:00
if ( notify )
return ;
2021-05-27 14:47:48 -05:00
while ( ! qp - > srq & & q & & queue_head ( q , q - > type ) )
2021-09-14 11:42:03 -05:00
queue_advance_consumer ( q , q - > type ) ;
2017-01-10 11:15:49 -08:00
}
2016-06-16 16:45:23 +03:00
int rxe_responder ( void * arg )
{
struct rxe_qp * qp = ( struct rxe_qp * ) arg ;
2017-03-10 18:23:56 +02:00
struct rxe_dev * rxe = to_rdev ( qp - > ibqp . device ) ;
2016-06-16 16:45:23 +03:00
enum resp_states state ;
struct rxe_pkt_info * pkt = NULL ;
int ret = 0 ;
2022-04-20 20:40:36 -05:00
if ( ! rxe_get ( qp ) )
return - EAGAIN ;
2016-12-05 08:43:21 -05:00
2016-06-16 16:45:23 +03:00
qp - > resp . aeth_syndrome = AETH_ACK_UNLIMITED ;
if ( ! qp - > valid ) {
ret = - EINVAL ;
goto done ;
}
switch ( qp - > resp . state ) {
case QP_STATE_RESET :
state = RESPST_RESET ;
break ;
default :
state = RESPST_GET_REQ ;
break ;
}
while ( 1 ) {
2016-09-28 20:26:26 +00:00
pr_debug ( " qp#%d state = %s \n " , qp_num ( qp ) ,
resp_state_name [ state ] ) ;
2016-06-16 16:45:23 +03:00
switch ( state ) {
case RESPST_GET_REQ :
state = get_req ( qp , & pkt ) ;
break ;
case RESPST_CHK_PSN :
state = check_psn ( qp , pkt ) ;
break ;
case RESPST_CHK_OP_SEQ :
state = check_op_seq ( qp , pkt ) ;
break ;
case RESPST_CHK_OP_VALID :
state = check_op_valid ( qp , pkt ) ;
break ;
case RESPST_CHK_RESOURCE :
state = check_resource ( qp , pkt ) ;
break ;
case RESPST_CHK_LENGTH :
state = check_length ( qp , pkt ) ;
break ;
case RESPST_CHK_RKEY :
state = check_rkey ( qp , pkt ) ;
break ;
case RESPST_EXECUTE :
state = execute ( qp , pkt ) ;
break ;
case RESPST_COMPLETE :
state = do_complete ( qp , pkt ) ;
break ;
case RESPST_READ_REPLY :
state = read_reply ( qp , pkt ) ;
break ;
2022-06-06 09:38:34 -05:00
case RESPST_ATOMIC_REPLY :
state = rxe_atomic_reply ( qp , pkt ) ;
break ;
2016-06-16 16:45:23 +03:00
case RESPST_ACKNOWLEDGE :
state = acknowledge ( qp , pkt ) ;
break ;
case RESPST_CLEANUP :
state = cleanup ( qp , pkt ) ;
break ;
case RESPST_DUPLICATE_REQUEST :
state = duplicate_request ( qp , pkt ) ;
break ;
case RESPST_ERR_PSN_OUT_OF_SEQ :
/* RC only - Class B. Drop packet. */
2022-06-23 21:16:27 +08:00
send_ack ( qp , AETH_NAK_PSN_SEQ_ERROR , qp - > resp . psn ) ;
2016-06-16 16:45:23 +03:00
state = RESPST_CLEANUP ;
break ;
case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ :
case RESPST_ERR_MISSING_OPCODE_FIRST :
case RESPST_ERR_MISSING_OPCODE_LAST_C :
case RESPST_ERR_UNSUPPORTED_OPCODE :
case RESPST_ERR_MISALIGNED_ATOMIC :
/* RC Only - Class C. */
do_class_ac_error ( qp , AETH_NAK_INVALID_REQ ,
IB_WC_REM_INV_REQ_ERR ) ;
state = RESPST_COMPLETE ;
break ;
case RESPST_ERR_MISSING_OPCODE_LAST_D1E :
state = do_class_d1e_error ( qp ) ;
break ;
case RESPST_ERR_RNR :
if ( qp_type ( qp ) = = IB_QPT_RC ) {
2017-03-10 18:23:56 +02:00
rxe_counter_inc ( rxe , RXE_CNT_SND_RNR ) ;
2016-06-16 16:45:23 +03:00
/* RC - class B */
2022-06-23 21:16:27 +08:00
send_ack ( qp , AETH_RNR_NAK |
2016-06-16 16:45:23 +03:00
( ~ AETH_TYPE_MASK &
qp - > attr . min_rnr_timer ) ,
pkt - > psn ) ;
} else {
/* UD/UC - class D */
qp - > resp . drop_msg = 1 ;
}
state = RESPST_CLEANUP ;
break ;
case RESPST_ERR_RKEY_VIOLATION :
if ( qp_type ( qp ) = = IB_QPT_RC ) {
/* Class C */
do_class_ac_error ( qp , AETH_NAK_REM_ACC_ERR ,
IB_WC_REM_ACCESS_ERR ) ;
state = RESPST_COMPLETE ;
} else {
qp - > resp . drop_msg = 1 ;
if ( qp - > srq ) {
/* UC/SRQ Class D */
qp - > resp . status = IB_WC_REM_ACCESS_ERR ;
state = RESPST_COMPLETE ;
} else {
/* UC/non-SRQ Class E. */
state = RESPST_CLEANUP ;
}
}
break ;
2021-06-07 23:25:51 -05:00
case RESPST_ERR_INVALIDATE_RKEY :
/* RC - Class J. */
qp - > resp . goto_error = 1 ;
qp - > resp . status = IB_WC_REM_INV_REQ_ERR ;
state = RESPST_COMPLETE ;
break ;
2016-06-16 16:45:23 +03:00
case RESPST_ERR_LENGTH :
if ( qp_type ( qp ) = = IB_QPT_RC ) {
/* Class C */
do_class_ac_error ( qp , AETH_NAK_INVALID_REQ ,
IB_WC_REM_INV_REQ_ERR ) ;
state = RESPST_COMPLETE ;
} else if ( qp - > srq ) {
/* UC/UD - class E */
qp - > resp . status = IB_WC_REM_INV_REQ_ERR ;
state = RESPST_COMPLETE ;
} else {
/* UC/UD - class D */
qp - > resp . drop_msg = 1 ;
state = RESPST_CLEANUP ;
}
break ;
case RESPST_ERR_MALFORMED_WQE :
/* All, Class A. */
do_class_ac_error ( qp , AETH_NAK_REM_OP_ERR ,
IB_WC_LOC_QP_OP_ERR ) ;
state = RESPST_COMPLETE ;
break ;
case RESPST_ERR_CQ_OVERFLOW :
/* All - Class G */
state = RESPST_ERROR ;
break ;
case RESPST_DONE :
if ( qp - > resp . goto_error ) {
state = RESPST_ERROR ;
break ;
}
goto done ;
case RESPST_EXIT :
if ( qp - > resp . goto_error ) {
state = RESPST_ERROR ;
break ;
}
goto exit ;
2017-01-10 11:15:49 -08:00
case RESPST_RESET :
2017-01-10 11:15:50 -08:00
rxe_drain_req_pkts ( qp , false ) ;
2016-06-16 16:45:23 +03:00
qp - > resp . wqe = NULL ;
goto exit ;
case RESPST_ERROR :
qp - > resp . goto_error = 0 ;
pr_warn ( " qp#%d moved to error state \n " , qp_num ( qp ) ) ;
rxe_qp_error ( qp ) ;
goto exit ;
default :
2017-01-10 11:15:47 -08:00
WARN_ON_ONCE ( 1 ) ;
2016-06-16 16:45:23 +03:00
}
}
exit :
ret = - EAGAIN ;
done :
2022-03-03 18:08:05 -06:00
rxe_put ( qp ) ;
2016-06-16 16:45:23 +03:00
return ret ;
}