xprtrdma: Re-arm after missed events
ib_req_notify_cq(IB_CQ_REPORT_MISSED_EVENTS) returns a positive value if WCs were added to a CQ after the last completion upcall but before the CQ has been re-armed. Commit7f23f6f6e3
("xprtrmda: Reduce lock contention in completion handlers") assumed that when ib_req_notify_cq() returned a positive RC, the CQ had also been successfully re-armed, making it safe to return control to the provider without losing any completion signals. That is an invalid assumption. Change both completion handlers to continue polling while ib_req_notify_cq() returns a positive value. Fixes:7f23f6f6e3
("xprtrmda: Reduce lock contention in ...") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Reviewed-by: Devesh Sharma <devesh.sharma@avagotech.com> Tested-By: Devesh Sharma <devesh.sharma@avagotech.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
a045178887
commit
7b3d770c67
@ -179,38 +179,17 @@ rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle send, fast_reg_mr, and local_inv completions.
|
||||
*
|
||||
* Send events are typically suppressed and thus do not result
|
||||
* in an upcall. Occasionally one is signaled, however. This
|
||||
* prevents the provider's completion queue from wrapping and
|
||||
* losing a completion.
|
||||
/* Handle provider send completion upcalls.
|
||||
*/
|
||||
static void
|
||||
rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
|
||||
{
|
||||
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
|
||||
int rc;
|
||||
|
||||
rc = rpcrdma_sendcq_poll(cq, ep);
|
||||
if (rc) {
|
||||
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
|
||||
__func__, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = ib_req_notify_cq(cq,
|
||||
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
|
||||
if (rc == 0)
|
||||
return;
|
||||
if (rc < 0) {
|
||||
dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
|
||||
__func__, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
rpcrdma_sendcq_poll(cq, ep);
|
||||
do {
|
||||
rpcrdma_sendcq_poll(cq, ep);
|
||||
} while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS) > 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -274,42 +253,17 @@ out_schedule:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle receive completions.
|
||||
*
|
||||
* It is reentrant but processes single events in order to maintain
|
||||
* ordering of receives to keep server credits.
|
||||
*
|
||||
* It is the responsibility of the scheduled tasklet to return
|
||||
* recv buffers to the pool. NOTE: this affects synchronization of
|
||||
* connection shutdown. That is, the structures required for
|
||||
* the completion of the reply handler must remain intact until
|
||||
* all memory has been reclaimed.
|
||||
/* Handle provider receive completion upcalls.
|
||||
*/
|
||||
static void
|
||||
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
|
||||
{
|
||||
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
|
||||
int rc;
|
||||
|
||||
rc = rpcrdma_recvcq_poll(cq, ep);
|
||||
if (rc) {
|
||||
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
|
||||
__func__, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = ib_req_notify_cq(cq,
|
||||
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
|
||||
if (rc == 0)
|
||||
return;
|
||||
if (rc < 0) {
|
||||
dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
|
||||
__func__, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
rpcrdma_recvcq_poll(cq, ep);
|
||||
do {
|
||||
rpcrdma_recvcq_poll(cq, ep);
|
||||
} while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS) > 0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
Loading…
Reference in New Issue
Block a user