RDMA/rxe: Split rxe_run_task() into two subroutines

Split rxe_run_task(task, sched) into rxe_run_task(task) and
rxe_sched_task(task).

Link: https://lore.kernel.org/r/20221021200118.2163-5-rpearsonhpe@gmail.com
Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bob Pearson 2022-10-21 15:01:05 -05:00 committed by Jason Gunthorpe
parent de669ae8af
commit dccb23f6c3
8 changed files with 44 additions and 34 deletions

View File

@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t)
if (qp->valid) {
qp->comp.timeout = 1;
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
}
}
@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
rxe_run_task(&qp->comp.task, must_sched);
if (must_sched)
rxe_sched_task(&qp->comp.task);
else
rxe_run_task(&qp->comp.task);
}
static inline enum comp_state get_wqe(struct rxe_qp *qp,
@ -313,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
}
return COMPST_ERROR_RETRY;
@ -460,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
}
@ -474,7 +477,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
}
@ -520,7 +523,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
}
@ -654,7 +657,7 @@ int rxe_completer(void *arg)
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
state = COMPST_DONE;
@ -722,7 +725,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
goto done;

View File

@ -345,7 +345,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
rxe_put(qp);
}
@ -429,7 +429,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
}
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);

View File

@ -536,10 +536,10 @@ static void rxe_qp_drain(struct rxe_qp *qp)
if (qp->req.state != QP_STATE_DRAINED) {
qp->req.state = QP_STATE_DRAIN;
if (qp_type(qp) == IB_QPT_RC)
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
else
__rxe_do_task(&qp->comp.task);
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
}
}
@ -553,13 +553,13 @@ void rxe_qp_error(struct rxe_qp *qp)
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
rxe_run_task(&qp->resp.task, 1);
rxe_sched_task(&qp->resp.task);
if (qp_type(qp) == IB_QPT_RC)
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
else
__rxe_do_task(&qp->comp.task);
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
/* called by the modify qp verb */

View File

@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t)
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
@ -608,7 +608,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
* which can lead to a deadlock. So go ahead and complete
* it now.
*/
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
return 0;
}
@ -733,7 +733,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
rxe_run_task(&qp->comp.task, 0);
rxe_run_task(&qp->comp.task);
goto done;
}
payload = mtu;
@ -795,7 +795,7 @@ int rxe_requester(void *arg)
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (err == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
goto exit;
}
@ -817,7 +817,7 @@ err:
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR;
rxe_run_task(&qp->comp.task, 0);
rxe_run_task(&qp->comp.task);
exit:
ret = -EAGAIN;
out:

View File

@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
(skb_queue_len(&qp->req_pkts) > 1);
rxe_run_task(&qp->resp.task, must_sched);
if (must_sched)
rxe_sched_task(&qp->resp.task);
else
rxe_run_task(&qp->resp.task);
}
static inline enum resp_states get_req(struct rxe_qp *qp,

View File

@ -123,15 +123,20 @@ void rxe_cleanup_task(struct rxe_task *task)
tasklet_kill(&task->tasklet);
}
void rxe_run_task(struct rxe_task *task, int sched)
void rxe_run_task(struct rxe_task *task)
{
if (task->destroyed)
return;
if (sched)
tasklet_schedule(&task->tasklet);
else
rxe_do_task(&task->tasklet);
rxe_do_task(&task->tasklet);
}
void rxe_sched_task(struct rxe_task *task)
{
if (task->destroyed)
return;
tasklet_schedule(&task->tasklet);
}
void rxe_disable_task(struct rxe_task *task)

View File

@ -52,10 +52,9 @@ int __rxe_do_task(struct rxe_task *task);
*/
void rxe_do_task(struct tasklet_struct *t);
/* run a task, else schedule it to run as a tasklet, The decision
* to run or schedule tasklet is based on the parameter sched.
*/
void rxe_run_task(struct rxe_task *task, int sched);
void rxe_run_task(struct rxe_task *task);
void rxe_sched_task(struct rxe_task *task);
/* keep a task from scheduling */
void rxe_disable_task(struct rxe_task *task);

View File

@ -695,9 +695,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
wr = next;
}
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
if (unlikely(qp->req.state == QP_STATE_ERROR))
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
return err;
}
@ -719,7 +719,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (qp->is_user) {
/* Utilize process context to do protocol processing */
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
return 0;
} else
return rxe_post_send_kernel(qp, wr, bad_wr);
@ -759,7 +759,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
rxe_sched_task(&qp->resp.task);
err1:
return err;