RDMA/cma: Remove dead code for kernel rdmacm multicast
[ Upstream commit 1bb5091def706732c749df9aae45fbca003696f2 ] There is no kernel user of RDMA CM multicast so this code managing the multicast subscription of the kernel-only internal QP is dead. Remove it. This makes the bug fixes in the next patches much simpler. Link: https://lore.kernel.org/r/20200902081122.745412-7-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
557c184df3
commit
7c4fec2898
@ -4182,16 +4182,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
||||
else
|
||||
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
|
||||
status);
|
||||
mutex_lock(&id_priv->qp_mutex);
|
||||
if (!status && id_priv->id.qp) {
|
||||
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
|
||||
be16_to_cpu(multicast->rec.mlid));
|
||||
if (status)
|
||||
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
|
||||
status);
|
||||
}
|
||||
mutex_unlock(&id_priv->qp_mutex);
|
||||
|
||||
event.status = status;
|
||||
event.param.ud.private_data = mc->context;
|
||||
if (!status) {
|
||||
@ -4446,6 +4436,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
||||
struct cma_multicast *mc;
|
||||
int ret;
|
||||
|
||||
/* Not supported for kernel QPs */
|
||||
if (WARN_ON(id->qp))
|
||||
return -EINVAL;
|
||||
|
||||
if (!id->device)
|
||||
return -EINVAL;
|
||||
|
||||
@ -4500,11 +4494,6 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||
list_del(&mc->list);
|
||||
spin_unlock_irq(&id_priv->lock);
|
||||
|
||||
if (id->qp)
|
||||
ib_detach_mcast(id->qp,
|
||||
&mc->multicast.ib->rec.mgid,
|
||||
be16_to_cpu(mc->multicast.ib->rec.mlid));
|
||||
|
||||
BUG_ON(id_priv->cma_dev->device != id->device);
|
||||
|
||||
if (rdma_cap_ib_mcast(id->device, id->port_num)) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user