RDMA/rxe: Move local ops to subroutine
Simplify rxe_requester() by moving the local operations to a subroutine. Add an error return for illegal send WR opcode. Moved next_index ahead of rxe_run_task which fixed a small bug where work completions were delayed until after the next wqe which was not the intended behavior. Let errors return their own WC status. Previously all errors were reported as protection errors which was incorrect. Changed the return of errors from rxe_do_local_ops() to err: which causes an immediate completion. Without this an error on a last WR may get lost. Changed fill_packet() to finish_packet() which is more accurate. Fixes: 8700e2e7c485 ("The software RoCE driver") Link: https://lore.kernel.org/r/20210608042552.33275-7-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
886441fb2e
commit
c1a411268a
@ -462,7 +462,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||
static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||
struct rxe_pkt_info *pkt, struct sk_buff *skb,
|
||||
int paylen)
|
||||
{
|
||||
@ -578,6 +578,54 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||
jiffies + qp->qp_timeout_jiffies);
|
||||
}
|
||||
|
||||
static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
{
|
||||
u8 opcode = wqe->wr.opcode;
|
||||
struct rxe_dev *rxe;
|
||||
struct rxe_mr *mr;
|
||||
u32 rkey;
|
||||
|
||||
switch (opcode) {
|
||||
case IB_WR_LOCAL_INV:
|
||||
rxe = to_rdev(qp->ibqp.device);
|
||||
rkey = wqe->wr.ex.invalidate_rkey;
|
||||
mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
|
||||
if (!mr) {
|
||||
pr_err("No MR for rkey %#x\n", rkey);
|
||||
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
||||
return -EINVAL;
|
||||
}
|
||||
mr->state = RXE_MR_STATE_FREE;
|
||||
rxe_drop_ref(mr);
|
||||
break;
|
||||
case IB_WR_REG_MR:
|
||||
mr = to_rmr(wqe->wr.wr.reg.mr);
|
||||
|
||||
rxe_add_ref(mr);
|
||||
mr->state = RXE_MR_STATE_VALID;
|
||||
mr->access = wqe->wr.wr.reg.access;
|
||||
mr->ibmr.lkey = wqe->wr.wr.reg.key;
|
||||
mr->ibmr.rkey = wqe->wr.wr.reg.key;
|
||||
mr->iova = wqe->wr.wr.reg.mr->iova;
|
||||
rxe_drop_ref(mr);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unexpected send wqe opcode %d\n", opcode);
|
||||
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
wqe->state = wqe_state_done;
|
||||
wqe->status = IB_WC_SUCCESS;
|
||||
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
|
||||
|
||||
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
|
||||
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rxe_requester(void *arg)
|
||||
{
|
||||
struct rxe_qp *qp = (struct rxe_qp *)arg;
|
||||
@ -618,42 +666,11 @@ next_wqe:
|
||||
goto exit;
|
||||
|
||||
if (wqe->mask & WR_LOCAL_OP_MASK) {
|
||||
if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
struct rxe_mr *rmr;
|
||||
|
||||
rmr = rxe_pool_get_index(&rxe->mr_pool,
|
||||
wqe->wr.ex.invalidate_rkey >> 8);
|
||||
if (!rmr) {
|
||||
pr_err("No mr for key %#x\n",
|
||||
wqe->wr.ex.invalidate_rkey);
|
||||
wqe->state = wqe_state_error;
|
||||
wqe->status = IB_WC_MW_BIND_ERR;
|
||||
goto exit;
|
||||
}
|
||||
rmr->state = RXE_MR_STATE_FREE;
|
||||
rxe_drop_ref(rmr);
|
||||
wqe->state = wqe_state_done;
|
||||
wqe->status = IB_WC_SUCCESS;
|
||||
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
|
||||
struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
|
||||
|
||||
rmr->state = RXE_MR_STATE_VALID;
|
||||
rmr->access = wqe->wr.wr.reg.access;
|
||||
rmr->ibmr.lkey = wqe->wr.wr.reg.key;
|
||||
rmr->ibmr.rkey = wqe->wr.wr.reg.key;
|
||||
rmr->iova = wqe->wr.wr.reg.mr->iova;
|
||||
wqe->state = wqe_state_done;
|
||||
wqe->status = IB_WC_SUCCESS;
|
||||
} else {
|
||||
goto exit;
|
||||
}
|
||||
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
|
||||
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
qp->req.wqe_index = next_index(qp->sq.queue,
|
||||
qp->req.wqe_index);
|
||||
goto next_wqe;
|
||||
ret = rxe_do_local_ops(qp, wqe);
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
else
|
||||
goto next_wqe;
|
||||
}
|
||||
|
||||
if (unlikely(qp_type(qp) == IB_QPT_RC &&
|
||||
@ -711,11 +728,17 @@ next_wqe:
|
||||
skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
|
||||
if (unlikely(!skb)) {
|
||||
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
|
||||
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (fill_packet(qp, wqe, &pkt, skb, payload)) {
|
||||
pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
|
||||
ret = finish_packet(qp, wqe, &pkt, skb, payload);
|
||||
if (unlikely(ret)) {
|
||||
pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
|
||||
if (ret == -EFAULT)
|
||||
wqe->status = IB_WC_LOC_PROT_ERR;
|
||||
else
|
||||
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
||||
kfree_skb(skb);
|
||||
goto err;
|
||||
}
|
||||
@ -740,6 +763,7 @@ next_wqe:
|
||||
goto exit;
|
||||
}
|
||||
|
||||
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -748,7 +772,6 @@ next_wqe:
|
||||
goto next_wqe;
|
||||
|
||||
err:
|
||||
wqe->status = IB_WC_LOC_PROT_ERR;
|
||||
wqe->state = wqe_state_error;
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user