RDMA/rxe: Don't schedule rxe_completer from rxe_requester

Now that rxe_completer() is always called serially after rxe_requester()
there is no reason to schedule rxe_completer() from rxe_requester().

Link: https://lore.kernel.org/r/20240329145513.35381-9-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bob Pearson 2024-03-29 09:55:09 -05:00 committed by Jason Gunthorpe
parent cd8aaddf0d
commit 4891f4fed0
2 changed files with 2 additions and 13 deletions

View File

@ -440,12 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err;
}
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
rxe_sched_task(&qp->send_task);
}
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
goto done;

View File

@ -545,6 +545,8 @@ static void update_wqe_state(struct rxe_qp *qp,
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
else
wqe->state = wqe_state_done;
} else {
wqe->state = wqe_state_processing;
}
@ -631,12 +633,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
/* There is no ack coming for local work requests
* which can lead to a deadlock. So go ahead and complete
* it now.
*/
rxe_sched_task(&qp->send_task);
return 0;
}
@ -760,7 +756,6 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
rxe_sched_task(&qp->send_task);
goto done;
}
payload = mtu;