xprtrdma: Remove ->ro_reset
An RPC can exit at any time. When it does so, xprt_rdma_free() is called, and it calls ->op_unmap(). If ->ro_reset() is running due to a transport disconnect, the two methods can race while processing the same rpcrdma_mw. The results are unpredictable. Because of this, in previous patches I've altered ->ro_map() to handle MR reset. ->ro_reset() is no longer needed and can be removed. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Reviewed-by: Devesh Sharma <devesh.sharma@avagotech.com> Tested-By: Devesh Sharma <devesh.sharma@avagotech.com> Reviewed-by: Doug Ledford <dledford@redhat.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
06b00880b0
commit
3269a94b62
@ -197,28 +197,6 @@ out_err:
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
/* After a disconnect, unmap all FMRs.
|
||||
*
|
||||
* This is invoked only in the transport connect worker in order
|
||||
* to serialize with rpcrdma_register_fmr_external().
|
||||
*/
|
||||
static void
|
||||
fmr_op_reset(struct rpcrdma_xprt *r_xprt)
|
||||
{
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
struct rpcrdma_mw *r;
|
||||
LIST_HEAD(list);
|
||||
int rc;
|
||||
|
||||
list_for_each_entry(r, &buf->rb_all, mw_all)
|
||||
list_add(&r->r.fmr->list, &list);
|
||||
|
||||
rc = ib_unmap_fmr(&list);
|
||||
if (rc)
|
||||
dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
|
||||
__func__, rc);
|
||||
}
|
||||
|
||||
static void
|
||||
fmr_op_destroy(struct rpcrdma_buffer *buf)
|
||||
{
|
||||
@ -242,7 +220,6 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
|
||||
.ro_open = fmr_op_open,
|
||||
.ro_maxpages = fmr_op_maxpages,
|
||||
.ro_init = fmr_op_init,
|
||||
.ro_reset = fmr_op_reset,
|
||||
.ro_destroy = fmr_op_destroy,
|
||||
.ro_displayname = "fmr",
|
||||
};
|
||||
|
@ -430,44 +430,6 @@ out_err:
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
|
||||
* an unusable state. Find FRMRs in this state and dereg / reg
|
||||
* each. FRMRs that are VALID and attached to an rpcrdma_req are
|
||||
* also torn down.
|
||||
*
|
||||
* This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
|
||||
*
|
||||
* This is invoked only in the transport connect worker in order
|
||||
* to serialize with rpcrdma_register_frmr_external().
|
||||
*/
|
||||
static void
|
||||
frwr_op_reset(struct rpcrdma_xprt *r_xprt)
|
||||
{
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
struct ib_device *device = r_xprt->rx_ia.ri_device;
|
||||
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
|
||||
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
||||
struct rpcrdma_mw *r;
|
||||
int rc;
|
||||
|
||||
list_for_each_entry(r, &buf->rb_all, mw_all) {
|
||||
if (r->r.frmr.fr_state == FRMR_IS_INVALID)
|
||||
continue;
|
||||
|
||||
__frwr_release(r);
|
||||
rc = __frwr_init(r, pd, device, depth);
|
||||
if (rc) {
|
||||
dprintk("RPC: %s: mw %p left %s\n",
|
||||
__func__, r,
|
||||
(r->r.frmr.fr_state == FRMR_IS_STALE ?
|
||||
"stale" : "valid"));
|
||||
continue;
|
||||
}
|
||||
|
||||
r->r.frmr.fr_state = FRMR_IS_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
frwr_op_destroy(struct rpcrdma_buffer *buf)
|
||||
{
|
||||
@ -490,7 +452,6 @@ const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
|
||||
.ro_open = frwr_op_open,
|
||||
.ro_maxpages = frwr_op_maxpages,
|
||||
.ro_init = frwr_op_init,
|
||||
.ro_reset = frwr_op_reset,
|
||||
.ro_destroy = frwr_op_destroy,
|
||||
.ro_displayname = "frwr",
|
||||
};
|
||||
|
@ -68,11 +68,6 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
physical_op_reset(struct rpcrdma_xprt *r_xprt)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
physical_op_destroy(struct rpcrdma_buffer *buf)
|
||||
{
|
||||
@ -84,7 +79,6 @@ const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
|
||||
.ro_open = physical_op_open,
|
||||
.ro_maxpages = physical_op_maxpages,
|
||||
.ro_init = physical_op_init,
|
||||
.ro_reset = physical_op_reset,
|
||||
.ro_destroy = physical_op_destroy,
|
||||
.ro_displayname = "physical",
|
||||
};
|
||||
|
@ -891,8 +891,6 @@ retry:
|
||||
rpcrdma_flush_cqs(ep);
|
||||
|
||||
xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
|
||||
ia->ri_ops->ro_reset(xprt);
|
||||
|
||||
id = rpcrdma_create_id(xprt, ia,
|
||||
(struct sockaddr *)&xprt->rx_data.addr);
|
||||
if (IS_ERR(id)) {
|
||||
|
@ -352,7 +352,6 @@ struct rpcrdma_memreg_ops {
|
||||
struct rpcrdma_create_data_internal *);
|
||||
size_t (*ro_maxpages)(struct rpcrdma_xprt *);
|
||||
int (*ro_init)(struct rpcrdma_xprt *);
|
||||
void (*ro_reset)(struct rpcrdma_xprt *);
|
||||
void (*ro_destroy)(struct rpcrdma_buffer *);
|
||||
const char *ro_displayname;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user