v6.3 rc RDMA pull request
Driver bug fixes: - irdma should not generate extra completions during flushing - Fix several memory leaks - Do not get confused in irdma's iwarp mode if IPv6 is present - Correct a link speed calculation in mlx5 - Increase the EQ/WQ limits on erdma as they are too small for big applications - Use the right math for erdma's inline mtt feature - Make erdma probing more robust to boot time ordering differences - Fix a KMSAN crash in CMA due to uninitialized qkey -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZDlBPgAKCRCFwuHvBreF YfOtAQDAX3rEL4T6xu4jIHAhInTYZCYVI7pJALTzHh62DmdoZAD+Owy2vTRTmkvJ OLfA++yDRWdrzeSeCWbTYpwEo+00TAA= =9XAm -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "We had a fairly slow cycle on the rc side this time, here are the accumulated fixes, mostly in drivers: - irdma should not generate extra completions during flushing - Fix several memory leaks - Do not get confused in irdma's iwarp mode if IPv6 is present - Correct a link speed calculation in mlx5 - Increase the EQ/WQ limits on erdma as they are too small for big applications - Use the right math for erdma's inline mtt feature - Make erdma probing more robust to boot time ordering differences - Fix a KMSAN crash in CMA due to uninitialized qkey" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/core: Fix GID entry ref leak when create_ah fails RDMA/cma: Allow UD qp_type to join multicast only RDMA/erdma: Defer probing if netdevice can not be found RDMA/erdma: Inline mtt entries into WQE if supported RDMA/erdma: Update default EQ depth to 4096 and max_send_wr to 8192 RDMA/erdma: Fix some typos IB/mlx5: Add support for 400G_8X lane speed RDMA/irdma: Add ipv4 check to irdma_find_listener() RDMA/irdma: Increase iWARP CM default rexmit count RDMA/irdma: Fix memory leak of PBLE objects RDMA/irdma: Do not generate SW completions for NOPs
This commit is contained in:
commit
aee3c14e86
@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
|
||||
return id_priv->id.route.addr.src_addr.ss_family;
|
||||
}
|
||||
|
||||
static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
|
||||
static int cma_set_default_qkey(struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct ib_sa_mcmember_rec rec;
|
||||
int ret = 0;
|
||||
|
||||
if (id_priv->qkey) {
|
||||
if (qkey && id_priv->qkey != qkey)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (qkey) {
|
||||
id_priv->qkey = qkey;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (id_priv->id.ps) {
|
||||
case RDMA_PS_UDP:
|
||||
case RDMA_PS_IB:
|
||||
@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
|
||||
{
|
||||
if (!qkey ||
|
||||
(id_priv->qkey && (id_priv->qkey != qkey)))
|
||||
return -EINVAL;
|
||||
|
||||
id_priv->qkey = qkey;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
|
||||
{
|
||||
dev_addr->dev_type = ARPHRD_INFINIBAND;
|
||||
@ -1229,7 +1228,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
|
||||
|
||||
if (id_priv->id.qp_type == IB_QPT_UD) {
|
||||
ret = cma_set_qkey(id_priv, 0);
|
||||
ret = cma_set_default_qkey(id_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -4569,7 +4568,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
|
||||
memset(&rep, 0, sizeof rep);
|
||||
rep.status = status;
|
||||
if (status == IB_SIDR_SUCCESS) {
|
||||
ret = cma_set_qkey(id_priv, qkey);
|
||||
if (qkey)
|
||||
ret = cma_set_qkey(id_priv, qkey);
|
||||
else
|
||||
ret = cma_set_default_qkey(id_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
rep.qp_num = id_priv->qp_num;
|
||||
@ -4774,9 +4776,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
|
||||
enum ib_gid_type gid_type;
|
||||
struct net_device *ndev;
|
||||
|
||||
if (!status)
|
||||
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
|
||||
else
|
||||
if (status)
|
||||
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
|
||||
status);
|
||||
|
||||
@ -4804,7 +4804,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
|
||||
}
|
||||
|
||||
event->param.ud.qp_num = 0xFFFFFF;
|
||||
event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
|
||||
event->param.ud.qkey = id_priv->qkey;
|
||||
|
||||
out:
|
||||
if (ndev)
|
||||
@ -4823,8 +4823,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
||||
READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
|
||||
goto out;
|
||||
|
||||
cma_make_mc_event(status, id_priv, multicast, &event, mc);
|
||||
ret = cma_cm_event_handler(id_priv, &event);
|
||||
ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
|
||||
if (!ret) {
|
||||
cma_make_mc_event(status, id_priv, multicast, &event, mc);
|
||||
ret = cma_cm_event_handler(id_priv, &event);
|
||||
}
|
||||
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
|
||||
WARN_ON(ret);
|
||||
|
||||
@ -4877,9 +4880,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cma_set_qkey(id_priv, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!id_priv->qkey) {
|
||||
ret = cma_set_default_qkey(id_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
|
||||
rec.qkey = cpu_to_be32(id_priv->qkey);
|
||||
@ -4956,9 +4961,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
|
||||
cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
|
||||
|
||||
ib.rec.pkey = cpu_to_be16(0xffff);
|
||||
if (id_priv->id.ps == RDMA_PS_UDP)
|
||||
ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
|
||||
|
||||
if (dev_addr->bound_dev_if)
|
||||
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
|
||||
if (!ndev)
|
||||
@ -4984,6 +4986,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
|
||||
if (err || !ib.rec.mtu)
|
||||
return err ?: -EINVAL;
|
||||
|
||||
if (!id_priv->qkey)
|
||||
cma_set_default_qkey(id_priv);
|
||||
|
||||
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
|
||||
&ib.rec.port_gid);
|
||||
INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
|
||||
@ -5009,6 +5014,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
||||
READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
|
||||
return -EINVAL;
|
||||
|
||||
if (id_priv->id.qp_type != IB_QPT_UD)
|
||||
return -EINVAL;
|
||||
|
||||
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
|
||||
if (!mc)
|
||||
return -ENOMEM;
|
||||
|
@ -532,6 +532,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
|
||||
else
|
||||
ret = device->ops.create_ah(ah, &init_attr, NULL);
|
||||
if (ret) {
|
||||
if (ah->sgid_attr)
|
||||
rdma_put_gid_attr(ah->sgid_attr);
|
||||
kfree(ah);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
|
||||
[ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
|
||||
[ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
|
||||
[ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
|
||||
[ERDMA_OP_ATOMIC_FAD] = IB_WC_FETCH_ADD,
|
||||
[ERDMA_OP_ATOMIC_FAA] = IB_WC_FETCH_ADD,
|
||||
};
|
||||
|
||||
static const struct {
|
||||
|
@ -441,7 +441,7 @@ struct erdma_reg_mr_sqe {
|
||||
};
|
||||
|
||||
/* EQ related. */
|
||||
#define ERDMA_DEFAULT_EQ_DEPTH 256
|
||||
#define ERDMA_DEFAULT_EQ_DEPTH 4096
|
||||
|
||||
/* ceqe */
|
||||
#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
|
||||
@ -491,7 +491,7 @@ enum erdma_opcode {
|
||||
ERDMA_OP_LOCAL_INV = 15,
|
||||
ERDMA_OP_READ_WITH_INV = 16,
|
||||
ERDMA_OP_ATOMIC_CAS = 17,
|
||||
ERDMA_OP_ATOMIC_FAD = 18,
|
||||
ERDMA_OP_ATOMIC_FAA = 18,
|
||||
ERDMA_NUM_OPCODES = 19,
|
||||
ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
|
||||
};
|
||||
|
@ -56,7 +56,7 @@ done:
|
||||
static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
int ret = -ENODEV;
|
||||
int ret = -EPROBE_DEFER;
|
||||
|
||||
/* Already binded to a net_device, so we skip. */
|
||||
if (dev->netdev)
|
||||
|
@ -405,7 +405,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
|
||||
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
|
||||
mr->mem.mtt_nents);
|
||||
|
||||
if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
|
||||
if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
|
||||
attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
|
||||
/* Copy SGLs to SQE content to accelerate */
|
||||
memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
|
||||
@ -439,7 +439,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
|
||||
cpu_to_le64(atomic_wr(send_wr)->compare_add);
|
||||
} else {
|
||||
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
|
||||
ERDMA_OP_ATOMIC_FAD);
|
||||
ERDMA_OP_ATOMIC_FAA);
|
||||
atomic_sqe->fetchadd_swap_data =
|
||||
cpu_to_le64(atomic_wr(send_wr)->compare_add);
|
||||
}
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
/* RDMA Capability. */
|
||||
#define ERDMA_MAX_PD (128 * 1024)
|
||||
#define ERDMA_MAX_SEND_WR 4096
|
||||
#define ERDMA_MAX_SEND_WR 8192
|
||||
#define ERDMA_MAX_ORD 128
|
||||
#define ERDMA_MAX_IRD 128
|
||||
#define ERDMA_MAX_SGE_RD 1
|
||||
|
@ -1458,13 +1458,15 @@ static int irdma_send_fin(struct irdma_cm_node *cm_node)
|
||||
* irdma_find_listener - find a cm node listening on this addr-port pair
|
||||
* @cm_core: cm's core
|
||||
* @dst_addr: listener ip addr
|
||||
* @ipv4: flag indicating IPv4 when true
|
||||
* @dst_port: listener tcp port num
|
||||
* @vlan_id: virtual LAN ID
|
||||
* @listener_state: state to match with listen node's
|
||||
*/
|
||||
static struct irdma_cm_listener *
|
||||
irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
|
||||
u16 vlan_id, enum irdma_cm_listener_state listener_state)
|
||||
irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
|
||||
u16 dst_port, u16 vlan_id,
|
||||
enum irdma_cm_listener_state listener_state)
|
||||
{
|
||||
struct irdma_cm_listener *listen_node;
|
||||
static const u32 ip_zero[4] = { 0, 0, 0, 0 };
|
||||
@ -1477,7 +1479,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
|
||||
list_for_each_entry (listen_node, &cm_core->listen_list, list) {
|
||||
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
|
||||
listen_port = listen_node->loc_port;
|
||||
if (listen_port != dst_port ||
|
||||
if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
|
||||
!(listener_state & listen_node->listener_state))
|
||||
continue;
|
||||
/* compare node pair, return node handle if a match */
|
||||
@ -2902,9 +2904,10 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
|
||||
unsigned long flags;
|
||||
|
||||
/* cannot have multiple matching listeners */
|
||||
listener = irdma_find_listener(cm_core, cm_info->loc_addr,
|
||||
cm_info->loc_port, cm_info->vlan_id,
|
||||
IRDMA_CM_LISTENER_EITHER_STATE);
|
||||
listener =
|
||||
irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
|
||||
cm_info->loc_port, cm_info->vlan_id,
|
||||
IRDMA_CM_LISTENER_EITHER_STATE);
|
||||
if (listener &&
|
||||
listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
|
||||
refcount_dec(&listener->refcnt);
|
||||
@ -3153,6 +3156,7 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
|
||||
|
||||
listener = irdma_find_listener(cm_core,
|
||||
cm_info.loc_addr,
|
||||
cm_info.ipv4,
|
||||
cm_info.loc_port,
|
||||
cm_info.vlan_id,
|
||||
IRDMA_CM_LISTENER_ACTIVE_STATE);
|
||||
|
@ -41,7 +41,7 @@
|
||||
#define TCP_OPTIONS_PADDING 3
|
||||
|
||||
#define IRDMA_DEFAULT_RETRYS 64
|
||||
#define IRDMA_DEFAULT_RETRANS 8
|
||||
#define IRDMA_DEFAULT_RETRANS 32
|
||||
#define IRDMA_DEFAULT_TTL 0x40
|
||||
#define IRDMA_DEFAULT_RTT_VAR 6
|
||||
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
|
||||
|
@ -41,6 +41,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
|
||||
IRDMA_HMC_IW_XFFL,
|
||||
IRDMA_HMC_IW_Q1,
|
||||
IRDMA_HMC_IW_Q1FL,
|
||||
IRDMA_HMC_IW_PBLE,
|
||||
IRDMA_HMC_IW_TIMER,
|
||||
IRDMA_HMC_IW_FSIMC,
|
||||
IRDMA_HMC_IW_FSIAV,
|
||||
@ -827,6 +828,8 @@ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
|
||||
info.entry_type = rf->sd_type;
|
||||
|
||||
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
|
||||
if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
|
||||
continue;
|
||||
if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
|
||||
info.rsrc_type = iw_hmc_obj_types[i];
|
||||
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
|
||||
|
@ -2595,7 +2595,10 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
|
||||
/* remove the SQ WR by moving SQ tail*/
|
||||
IRDMA_RING_SET_TAIL(*sq_ring,
|
||||
sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
|
||||
|
||||
if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
|
||||
kfree(cmpl);
|
||||
continue;
|
||||
}
|
||||
ibdev_dbg(iwqp->iwscq->ibcq.device,
|
||||
"DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
|
||||
__func__, cmpl->cpi.wr_id, qp->qp_id);
|
||||
|
@ -442,6 +442,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
|
||||
*active_width = IB_WIDTH_2X;
|
||||
*active_speed = IB_SPEED_NDR;
|
||||
break;
|
||||
case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
|
||||
*active_width = IB_WIDTH_8X;
|
||||
*active_speed = IB_SPEED_HDR;
|
||||
break;
|
||||
case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
|
||||
*active_width = IB_WIDTH_4X;
|
||||
*active_speed = IB_SPEED_NDR;
|
||||
|
Loading…
x
Reference in New Issue
Block a user