diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile index 2a334800f109..228be05fbaf8 100644 --- a/drivers/infiniband/hw/mlx5/Makefile +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -1,11 +1,25 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o +obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o + +mlx5_ib-y := ah.o \ + cmd.o \ + cong.o \ + cq.o \ + doorbell.o \ + gsi.o \ + ib_virt.o \ + mad.o \ + main.o \ + mem.o \ + mr.o \ + qp.o \ + qpc.o \ + restrack.o \ + srq.o \ + srq_cmd.o -mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \ - srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \ - cong.o restrack.o mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o -mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o -mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o -mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o +mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \ + flow.o \ + qos.o diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 4c26492ab8a3..a2fcbc49131e 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -327,23 +327,6 @@ int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id, - u16 uid) -{ - u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; - int err; - - MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); - MLX5_SET(alloc_q_counter_in, in, uid, uid); - - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (!err) - *counter_id = MLX5_GET(alloc_q_counter_out, out, - counter_set_id); - return err; -} - int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port) { diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 945ebce73613..43079b18d9b4 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -61,8 +61,6 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn, u16 uid); int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid); int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid); -int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id, - u16 uid); int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); #endif /* MLX5_IB_CMD_H */ diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 146ba2966744..0c18cb6a2f14 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -36,6 +36,7 @@ #include #include "mlx5_ib.h" #include "srq.h" +#include "qp.h" static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) { @@ -201,7 +202,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, case MLX5_CQE_RESP_WR_IMM: wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = cqe->imm_inval_pkey; + wc->ex.imm_data = cqe->immediate; break; case MLX5_CQE_RESP_SEND: wc->opcode = IB_WC_RECV; @@ -213,12 +214,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, case MLX5_CQE_RESP_SEND_IMM: wc->opcode = IB_WC_RECV; wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = cqe->imm_inval_pkey; + wc->ex.imm_data = cqe->immediate; break; case MLX5_CQE_RESP_SEND_INV: wc->opcode = IB_WC_RECV; wc->wc_flags = IB_WC_WITH_INVALIDATE; - wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); + wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey); break; } wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; @@ -226,7 +227,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; wc->wc_flags |= g ? IB_WC_GRH : 0; if (unlikely(is_qp1(qp->ibqp.qp_type))) { - u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; + u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff; ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, &wc->pkey_index); @@ -484,7 +485,7 @@ repoll: * because CQs will be locked while QPs are removed * from the table. */ - mqp = __mlx5_qp_lookup(dev->mdev, qpn); + mqp = radix_tree_lookup(&dev->qp_table.tree, qpn); *cur_qp = to_mibqp(mqp); } diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 46e1ab771f10..35b98c2d64d5 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -14,6 +14,7 @@ #include #include #include "mlx5_ib.h" +#include "qp.h" #include #define UVERBS_MODULE_NAME mlx5_ib @@ -1356,7 +1357,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, } if (obj->flags & DEVX_OBJ_FLAGS_DCT) - ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); + ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); else if (obj->flags & DEVX_OBJ_FLAGS_CQ) ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); else @@ -1450,9 +1451,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( if (opcode == MLX5_CMD_OP_CREATE_DCT) { obj->flags |= DEVX_OBJ_FLAGS_DCT; - err = mlx5_core_create_dct(dev->mdev, &obj->core_dct, - cmd_in, cmd_in_len, - cmd_out, cmd_out_len); + err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, + cmd_in_len, cmd_out, cmd_out_len); } else if (opcode == MLX5_CMD_OP_CREATE_CQ) { obj->flags |= DEVX_OBJ_FLAGS_CQ; obj->core_cq.comp = devx_cq_comp; @@ -1499,7 +1499,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( obj_destroy: if (obj->flags & DEVX_OBJ_FLAGS_DCT) - mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); + mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); else if (obj->flags & DEVX_OBJ_FLAGS_CQ) mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); else diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index 862b7bf3e646..69cb7e6e8955 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -427,7 +427,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)( num_actions = uverbs_attr_ptr_get_array_size( attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, - MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)); + MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)); if (num_actions < 0) return num_actions; @@ -648,7 +648,7 @@ DECLARE_UVERBS_NAMED_METHOD( UA_MANDATORY), UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES( - set_action_in_add_action_in_auto)), + set_add_copy_action_in_auto)), UA_MANDATORY, UA_ALLOC_AND_COPY), UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE, diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c index b61165359954..46b2d370fb3f 100644 --- a/drivers/infiniband/hw/mlx5/ib_virt.c +++ b/drivers/infiniband/hw/mlx5/ib_virt.c @@ -134,7 +134,7 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, if (!out) return -ENOMEM; - err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz); + err = mlx5_core_query_vport_counter(mdev, true, vf, port, out); if (err) goto ex; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 14e0c17de6a9..454ce5de2de7 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include #include #include #include @@ -188,8 +187,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, goto done; } - err = mlx5_core_query_vport_counter(mdev, 0, 0, - mdev_port_num, out_cnt, sz); + err = mlx5_core_query_vport_counter(mdev, 0, 0, mdev_port_num, + out_cnt); if (!err) pma_cnt_ext_assign(pma_cnt_ext, out_cnt); } else { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6679756506e6..65e0e24d463b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -59,6 +59,7 @@ #include "ib_rep.h" #include "cmd.h" #include "srq.h" +#include "qp.h" #include #include #include @@ -2443,7 +2444,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, act_size = roundup_pow_of_two(act_size); dm->size = act_size; - err = mlx5_dm_sw_icm_alloc(dev, type, act_size, + err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment, to_mucontext(ctx)->devx_uid, &dm->dev_addr, &dm->icm_dm.obj_id); if (err) @@ -4632,8 +4633,7 @@ static void delay_drop_handler(struct work_struct *work) atomic_inc(&delay_drop->events_cnt); mutex_lock(&delay_drop->lock); - err = mlx5_core_set_delay_drop(delay_drop->dev->mdev, - delay_drop->timeout); + err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout); if (err) { mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", delay_drop->timeout); @@ -5439,15 +5439,21 @@ static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev) static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) { + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; int num_cnt_ports; int i; num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; + MLX5_SET(dealloc_q_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_Q_COUNTER); + for (i = 0; i < num_cnt_ports; i++) { - if (dev->port[i].cnts.set_id_valid) - mlx5_core_dealloc_q_counter(dev->mdev, - dev->port[i].cnts.set_id); + if (dev->port[i].cnts.set_id) { + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, + dev->port[i].cnts.set_id); + mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); + } kfree(dev->port[i].cnts.names); kfree(dev->port[i].cnts.offsets); } @@ -5556,11 +5562,14 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) { + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; int num_cnt_ports; int err = 0; int i; bool is_shared; + MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; @@ -5572,17 +5581,19 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, dev->port[i].cnts.offsets); - err = mlx5_cmd_alloc_q_counter(dev->mdev, - &dev->port[i].cnts.set_id, - is_shared ? - MLX5_SHARED_RESOURCE_UID : 0); + MLX5_SET(alloc_q_counter_in, in, uid, + is_shared ? MLX5_SHARED_RESOURCE_UID : 0); + + err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out); if (err) { mlx5_ib_warn(dev, "couldn't allocate queue counter for port %d, err %d\n", i + 1, err); goto err_alloc; } - dev->port[i].cnts.set_id_valid = true; + + dev->port[i].cnts.set_id = + MLX5_GET(alloc_q_counter_out, out, counter_set_id); } return 0; @@ -5638,27 +5649,23 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, struct rdma_hw_stats *stats, u16 set_id) { - int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); - void *out; + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; __be32 val; int ret, i; - out = kvzalloc(outlen, GFP_KERNEL); - if (!out) - return -ENOMEM; - - ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen); + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + MLX5_SET(query_q_counter_in, in, counter_set_id, set_id); + ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out); if (ret) - goto free; + return ret; for (i = 0; i < cnts->num_q_counters; i++) { - val = *(__be32 *)(out + cnts->offsets[i]); + val = *(__be32 *)((void *)out + cnts->offsets[i]); stats->value[i] = (u64)be32_to_cpu(val); } -free: - kvfree(out); - return ret; + return 0; } static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, @@ -5765,20 +5772,38 @@ static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) counter->stats, counter->id); } +static int mlx5_ib_counter_dealloc(struct rdma_counter *counter) +{ + struct mlx5_ib_dev *dev = to_mdev(counter->device); + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; + + if (!counter->id) + return 0; + + MLX5_SET(dealloc_q_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_Q_COUNTER); + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id); + return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); +} + static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp) { struct mlx5_ib_dev *dev = to_mdev(qp->device); - u16 cnt_set_id = 0; int err; if (!counter->id) { - err = mlx5_cmd_alloc_q_counter(dev->mdev, - &cnt_set_id, - MLX5_SHARED_RESOURCE_UID); + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; + + MLX5_SET(alloc_q_counter_in, in, opcode, + MLX5_CMD_OP_ALLOC_Q_COUNTER); + MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID); + err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out); if (err) return err; - counter->id = cnt_set_id; + counter->id = + MLX5_GET(alloc_q_counter_out, out, counter_set_id); } err = mlx5_ib_qp_set_counter(qp, counter); @@ -5788,7 +5813,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return 0; fail_set_counter: - mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id); + mlx5_ib_counter_dealloc(counter); counter->id = 0; return err; @@ -5799,13 +5824,6 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp) return mlx5_ib_qp_set_counter(qp, NULL); } -static int mlx5_ib_counter_dealloc(struct rdma_counter *counter) -{ - struct mlx5_ib_dev *dev = to_mdev(counter->device); - - return mlx5_core_dealloc_q_counter(dev->mdev, counter->id); -} - static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params) @@ -7175,6 +7193,9 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_stage_roce_init, mlx5_ib_stage_roce_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_QP, + mlx5_init_qp_table, + mlx5_cleanup_qp_table), STAGE_CREATE(MLX5_IB_STAGE_SRQ, mlx5_init_srq_table, mlx5_cleanup_srq_table), @@ -7232,6 +7253,9 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_stage_raw_eth_roce_init, mlx5_ib_stage_raw_eth_roce_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_QP, + mlx5_init_qp_table, + mlx5_cleanup_qp_table), STAGE_CREATE(MLX5_IB_STAGE_SRQ, mlx5_init_srq_table, mlx5_cleanup_srq_table), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index a4e522385de0..aaabb8a98eed 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -780,7 +780,6 @@ struct mlx5_ib_counters { u32 num_cong_counters; u32 num_ext_ppcnt_counters; u16 set_id; - bool set_id_valid; }; struct mlx5_ib_multiport_info; @@ -870,6 +869,7 @@ enum mlx5_ib_stages { MLX5_IB_STAGE_CAPS, MLX5_IB_STAGE_NON_DEFAULT_CB, MLX5_IB_STAGE_ROCE, + MLX5_IB_STAGE_QP, MLX5_IB_STAGE_SRQ, MLX5_IB_STAGE_DEVICE_RESOURCES, MLX5_IB_STAGE_DEVICE_NOTIFIER, @@ -1065,6 +1065,7 @@ struct mlx5_ib_dev { struct mlx5_dm dm; u16 devx_whitelist_uid; struct mlx5_srq_table srq_table; + struct mlx5_qp_table qp_table; struct mlx5_async_ctx async_ctx; struct mlx5_devx_event_table devx_event_table; struct mlx5_var_table var_table; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 3de7606d4a1a..16af1105cfcf 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -36,6 +36,7 @@ #include "mlx5_ib.h" #include "cmd.h" +#include "qp.h" #include @@ -1219,7 +1220,7 @@ static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev, case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE: case MLX5_WQE_PF_TYPE_RESP: case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC: - common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP); + common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP); break; default: break; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 1456db4b6295..af599c8b88aa 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -39,6 +39,7 @@ #include "mlx5_ib.h" #include "ib_rep.h" #include "cmd.h" +#include "qp.h" /* not supported currently */ static int wq_signature; @@ -1254,7 +1255,7 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, u32 tdn, struct ib_pd *pd) { - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); @@ -1262,7 +1263,7 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, if (qp->flags & MLX5_IB_QP_UNDERLAY) MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); - return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn); + return mlx5_core_create_tis(dev->mdev, in, &sq->tisn); } static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, @@ -1336,7 +1337,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); - err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp); + err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp); kvfree(in); @@ -1356,7 +1357,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq) { destroy_flow_rule_vport_sq(sq); - mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); + mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp); ib_umem_release(sq->ubuffer.umem); } @@ -1426,7 +1427,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas); memcpy(pas, qp_pas, rq_pas_size); - err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp); + err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp); kvfree(in); @@ -1436,7 +1437,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq) { - mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp); + mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp); } static bool tunnel_offload_supported(struct mlx5_core_dev *dev) @@ -1459,9 +1460,8 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, u32 tdn, - u32 *qp_flags_en, - struct ib_pd *pd, - u32 *out, int outlen) + u32 *qp_flags_en, struct ib_pd *pd, + u32 *out) { u8 lb_flag = 0; u32 *in; @@ -1494,9 +1494,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, } MLX5_SET(tirc, tirc, self_lb_block, lb_flag); - - err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen); - + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); rq->tirn = MLX5_GET(create_tir_out, out, tirn); if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { err = mlx5_ib_enable_lb(dev, false, true); @@ -1556,9 +1555,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (err) goto err_destroy_sq; - err = create_raw_packet_qp_tir( - dev, rq, tdn, &qp->flags_en, pd, out, - MLX5_ST_SZ_BYTES(create_tir_out)); + err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd, + out); if (err) goto err_destroy_rq; @@ -1853,7 +1851,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); create_tir: - err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen); + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn); if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { @@ -2347,7 +2346,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata, &resp); } else { - err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); + err = mlx5_core_create_qp(dev, &base->mqp, in, inlen); } if (err) { @@ -2513,8 +2512,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->state != IB_QPS_RESET) { if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && !(qp->flags & MLX5_IB_QP_UNDERLAY)) { - err = mlx5_core_qp_modify(dev->mdev, - MLX5_CMD_OP_2RST_QP, 0, + err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, NULL, &base->mqp); } else { struct mlx5_modify_raw_qp_param raw_qp_param = { @@ -2555,7 +2553,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, qp->flags & MLX5_IB_QP_UNDERLAY) { destroy_raw_packet_qp(dev, qp); } else { - err = mlx5_core_destroy_qp(dev->mdev, &base->mqp); + err = mlx5_core_destroy_qp(dev, &base->mqp); if (err) mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", base->mqp.qpn); @@ -2818,7 +2816,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) if (mqp->state == IB_QPS_RTR) { int err; - err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct); + err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct); if (err) { mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err); return err; @@ -2933,7 +2931,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1)); - err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); + err = mlx5_core_modify_tis(dev, sq->tisn, in); kvfree(in); @@ -2960,7 +2958,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity); - err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); + err = mlx5_core_modify_tis(dev, sq->tisn, in); kvfree(in); @@ -3240,7 +3238,7 @@ static int modify_raw_packet_qp_rq( "RAW PACKET QP counters are not supported on current FW\n"); } - err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen); + err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in); if (err) goto out; @@ -3303,7 +3301,7 @@ static int modify_raw_packet_qp_sq( MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); } - err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); + err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in); if (err) { /* Remove new rate from table if failed */ if (new_rate_added) @@ -3462,10 +3460,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, base = &mqp->trans_qp.base; context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24); - return mlx5_core_qp_modify(dev->mdev, - MLX5_CMD_OP_RTS2RTS_QP, - MLX5_QP_OPTPAR_COUNTER_SET_ID, - &context, &base->mqp); + return mlx5_core_qp_modify(dev, MLX5_CMD_OP_RTS2RTS_QP, + MLX5_QP_OPTPAR_COUNTER_SET_ID, &context, + &base->mqp); } static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, @@ -3752,8 +3749,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); } else { - err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, - &base->mqp); + err = mlx5_core_qp_modify(dev, op, optpar, context, &base->mqp); } if (err) @@ -3927,7 +3923,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); - err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, + err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, MLX5_ST_SZ_BYTES(create_dct_in), out, sizeof(out)); if (err) @@ -3935,7 +3931,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, resp.dctn = qp->dct.mdct.mqp.qpn; err = ib_copy_to_udata(udata, &resp, resp.response_length); if (err) { - mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct); + mlx5_core_destroy_dct(dev, &qp->dct.mdct); return err; } } else { @@ -5697,8 +5693,7 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (!outb) return -ENOMEM; - err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, - outlen); + err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen); if (err) goto out; @@ -5776,7 +5771,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, if (!out) return -ENOMEM; - err = mlx5_core_dct_query(dev->mdev, dct, out, outlen); + err = mlx5_core_dct_query(dev, dct, out, outlen); if (err) goto out; @@ -5962,7 +5957,7 @@ static int set_delay_drop(struct mlx5_ib_dev *dev) if (dev->delay_drop.activate) goto out; - err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout); + err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout); if (err) goto out; @@ -6068,13 +6063,13 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, } rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); - err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp); + err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp); if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { err = set_delay_drop(dev); if (err) { mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n", err); - mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); + mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); } else { rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; } @@ -6256,7 +6251,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, return &rwq->ibwq; err_copy: - mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); + mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); err_user_rq: destroy_user_rq(dev, pd, rwq, udata); err: @@ -6269,7 +6264,7 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) struct mlx5_ib_dev *dev = to_mdev(wq->device); struct mlx5_ib_rwq *rwq = to_mrwq(wq); - mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); + mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); destroy_user_rq(dev, wq->pd, rwq, udata); kfree(rwq); } @@ -6447,7 +6442,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, "Receive WQ counters are not supported on current FW\n"); } - err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen); + err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in); if (!err) rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h new file mode 100644 index 000000000000..ad9d76e3e18a --- /dev/null +++ b/drivers/infiniband/hw/mlx5/qp.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* + * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. + */ + +#ifndef _MLX5_IB_QP_H +#define _MLX5_IB_QP_H + +#include "mlx5_ib.h" + +int mlx5_init_qp_table(struct mlx5_ib_dev *dev); +void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev); + +int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp, + u32 *in, int inlen, u32 *out, int outlen); +int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp, + u32 *in, int inlen); +int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask, + void *qpc, struct mlx5_core_qp *qp); +int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp); +int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct); +int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp, + u32 *out, int outlen); +int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, + u32 *out, int outlen); + +int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec); + +void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, + struct mlx5_core_qp *rq); +int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *sq); +void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev, + struct mlx5_core_qp *sq); + +int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *rq); + +struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev, + int res_num, + enum mlx5_res_type res_type); +void mlx5_core_res_put(struct mlx5_core_rsc_common *res); + +int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn); +int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn); +#endif /* _MLX5_IB_QP_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/infiniband/hw/mlx5/qpc.c similarity index 55% rename from drivers/net/ethernet/mellanox/mlx5/core/qp.c rename to drivers/infiniband/hw/mlx5/qpc.c index c3aea4cc2fff..ea62735042f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -1,46 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. */ #include -#include -#include #include #include -#include +#include "mlx5_ib.h" +#include "qp.h" -#include "mlx5_core.h" -#include "lib/eq.h" - -static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, +static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct); static struct mlx5_core_rsc_common * @@ -124,11 +93,9 @@ static int rsc_event_notifier(struct notifier_block *nb, { struct mlx5_core_rsc_common *common; struct mlx5_qp_table *table; - struct mlx5_core_dev *dev; struct mlx5_core_dct *dct; u8 event_type = (u8)type; struct mlx5_core_qp *qp; - struct mlx5_priv *priv; struct mlx5_eqe *eqe; u32 rsn; @@ -155,22 +122,12 @@ static int rsc_event_notifier(struct notifier_block *nb, } table = container_of(nb, struct mlx5_qp_table, nb); - priv = container_of(table, struct mlx5_priv, qp_table); - dev = container_of(priv, struct mlx5_core_dev, priv); - - mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn); - common = mlx5_get_rsc(table, rsn); - if (!common) { - mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn); + if (!common) return NOTIFY_OK; - } - if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) { - mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n", - event_type, rsn); + if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) goto out; - } switch (common->res) { case MLX5_RES_QP: @@ -185,7 +142,7 @@ static int rsc_event_notifier(struct notifier_block *nb, complete(&dct->drained); break; default: - mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); + break; } out: mlx5_core_put_rsc(common); @@ -193,11 +150,10 @@ out: return NOTIFY_OK; } -static int create_resource_common(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp, - int rsc_type) +static int create_resource_common(struct mlx5_ib_dev *dev, + struct mlx5_core_qp *qp, int rsc_type) { - struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_qp_table *table = &dev->qp_table; int err; qp->common.res = rsc_type; @@ -216,10 +172,10 @@ static int create_resource_common(struct mlx5_core_dev *dev, return 0; } -static void destroy_resource_common(struct mlx5_core_dev *dev, +static void destroy_resource_common(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) { - struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_qp_table *table = &dev->qp_table; unsigned long flags; spin_lock_irqsave(&table->lock, flags); @@ -230,24 +186,19 @@ static void destroy_resource_common(struct mlx5_core_dev *dev, wait_for_completion(&qp->common.free); } -static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev, +static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, bool need_cleanup) { - u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {}; struct mlx5_core_qp *qp = &dct->mqp; int err; err = mlx5_core_drain_dct(dev, dct); if (err) { - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto destroy; - } else { - mlx5_core_warn( - dev, "failed drain DCT 0x%x with error 0x%x\n", - qp->qpn, err); - return err; - } + + return err; } wait_for_completion(&dct->drained); destroy: @@ -256,15 +207,12 @@ destroy: MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); MLX5_SET(destroy_dct_in, in, uid, qp->uid); - err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), - (void *)&out, sizeof(out)); + err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in); return err; } -int mlx5_core_create_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *dct, - u32 *in, int inlen, - u32 *out, int outlen) +int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, + u32 *in, int inlen, u32 *out, int outlen) { struct mlx5_core_qp *qp = &dct->mqp; int err; @@ -272,11 +220,9 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, init_completion(&dct->drained); MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); - if (err) { - mlx5_core_warn(dev, "create DCT failed, ret %d\n", err); + err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen); + if (err) return err; - } qp->qpn = MLX5_GET(create_dct_out, out, dctn); qp->uid = MLX5_GET(create_dct_in, in, uid); @@ -289,108 +235,83 @@ err_cmd: _mlx5_core_destroy_dct(dev, dct, false); return err; } -EXPORT_SYMBOL_GPL(mlx5_core_create_dct); -int mlx5_core_create_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp, +int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; - u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)]; - u32 din[MLX5_ST_SZ_DW(destroy_qp_in)]; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; + u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; int err; MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out)); if (err) return err; qp->uid = MLX5_GET(create_qp_in, in, uid); qp->qpn = MLX5_GET(create_qp_out, out, qpn); - mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); err = create_resource_common(dev, qp, MLX5_RES_QP); if (err) goto err_cmd; - err = mlx5_debug_qp_add(dev, qp); - if (err) - mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", - qp->qpn); - - atomic_inc(&dev->num_qps); + mlx5_debug_qp_add(dev->mdev, qp); return 0; err_cmd: - memset(din, 0, sizeof(din)); - memset(dout, 0, sizeof(dout)); MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); MLX5_SET(destroy_qp_in, din, uid, qp->uid); - mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); + mlx5_cmd_exec_in(dev->mdev, destroy_qp, din); return err; } -EXPORT_SYMBOL_GPL(mlx5_core_create_qp); -static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, +static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct) { - u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {}; struct mlx5_core_qp *qp = &dct->mqp; MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT); MLX5_SET(drain_dct_in, in, dctn, qp->qpn); MLX5_SET(drain_dct_in, in, uid, qp->uid); - return mlx5_cmd_exec(dev, (void *)&in, sizeof(in), - (void *)&out, sizeof(out)); + return mlx5_cmd_exec_in(dev->mdev, drain_dct, in); } -int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, +int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct) { return _mlx5_core_destroy_dct(dev, dct, true); } -EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct); -int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp) +int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) { - u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0}; - int err; + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; - mlx5_debug_qp_remove(dev, qp); + mlx5_debug_qp_remove(dev->mdev, qp); destroy_resource_common(dev, qp); MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, uid, qp->uid); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - return err; - - atomic_dec(&dev->num_qps); + mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); return 0; } -EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); -int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, +int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec) { - u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {}; MLX5_SET(set_delay_drop_params_in, in, opcode, MLX5_CMD_OP_SET_DELAY_DROP_PARAMS); MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout, timeout_usec / 100); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in); } -EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop); struct mbox_info { u32 *in; @@ -496,120 +417,112 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, opt_param_mask, qpc, uid); break; default: - mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n", - opcode, qpn); return -EINVAL; } return 0; } -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, - u32 opt_param_mask, void *qpc, - struct mlx5_core_qp *qp) +int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask, + void *qpc, struct mlx5_core_qp *qp) { struct mbox_info mbox; int err; - err = modify_qp_mbox_alloc(dev, opcode, qp->qpn, + err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask, qpc, &mbox, qp->uid); if (err) return err; - err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen); + err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out, + mbox.outlen); mbox_free(&mbox); return err; } -EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); -void mlx5_init_qp_table(struct mlx5_core_dev *dev) +int mlx5_init_qp_table(struct mlx5_ib_dev *dev) { - struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_qp_table *table = &dev->qp_table; - memset(table, 0, sizeof(*table)); spin_lock_init(&table->lock); INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); - mlx5_qp_debugfs_init(dev); + mlx5_qp_debugfs_init(dev->mdev); table->nb.notifier_call = rsc_event_notifier; - mlx5_notifier_register(dev, &table->nb); + mlx5_notifier_register(dev->mdev, &table->nb); + + return 0; } -void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) +void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev) { - struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_qp_table *table = &dev->qp_table; - mlx5_notifier_unregister(dev, &table->nb); - mlx5_qp_debugfs_cleanup(dev); + mlx5_notifier_unregister(dev->mdev, &table->nb); + mlx5_qp_debugfs_cleanup(dev->mdev); } -int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, +int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {}; MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); MLX5_SET(query_qp_in, in, qpn, qp->qpn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen); } -EXPORT_SYMBOL_GPL(mlx5_core_qp_query); -int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, +int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {}; struct mlx5_core_qp *qp = &dct->mqp; MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT); MLX5_SET(query_dct_in, in, dctn, qp->qpn); - return mlx5_cmd_exec(dev, (void *)&in, sizeof(in), - (void *)out, outlen); + return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out, + outlen); } -EXPORT_SYMBOL_GPL(mlx5_core_dct_query); -int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) +int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn) { - u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {}; int err; MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out); if (!err) *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd); return err; } -EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); -int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) +int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn) { - u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {}; MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD); MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in); } -EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); -static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid) +static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) { - u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; - u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {}; + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); MLX5_SET(destroy_rq_in, in, uid, uid); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); } -int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, +int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq) { int err; u32 rqn; - err = mlx5_core_create_rq(dev, in, inlen, &rqn); + err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn); if (err) return err; @@ -626,39 +539,37 @@ err_destroy_rq: return err; } -EXPORT_SYMBOL(mlx5_core_create_rq_tracked); -void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, +void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { destroy_resource_common(dev, rq); destroy_rq_tracked(dev, rq->qpn, rq->uid); } -EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked); -static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid) +static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) { - u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {}; - u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {}; + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {}; MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); MLX5_SET(destroy_sq_in, in, sqn, sqn); MLX5_SET(destroy_sq_in, in, uid, uid); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev->mdev, destroy_sq, in); } -int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, +int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *sq) { + u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {}; int err; - u32 sqn; - err = mlx5_core_create_sq(dev, in, inlen, &sqn); + MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); + err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out)); if (err) return err; + sq->qpn = MLX5_GET(create_sq_out, out, sqn); sq->uid = MLX5_GET(create_sq_in, in, uid); - sq->qpn = sqn; err = create_resource_common(dev, sq, MLX5_RES_SQ); if (err) goto err_destroy_sq; @@ -670,68 +581,25 @@ err_destroy_sq: return err; } -EXPORT_SYMBOL(mlx5_core_create_sq_tracked); -void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, +void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *sq) { destroy_resource_common(dev, sq); destroy_sq_tracked(dev, sq->qpn, sq->uid); } -EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); -int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) -{ - u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; - int err; - - MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (!err) - *counter_id = MLX5_GET(alloc_q_counter_out, out, - counter_set_id); - return err; -} -EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter); - -int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) -{ - u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0}; - - MLX5_SET(dealloc_q_counter_in, in, opcode, - MLX5_CMD_OP_DEALLOC_Q_COUNTER); - MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} -EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter); - -int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, - int reset, void *out, int out_size) -{ - u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; - - MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); - MLX5_SET(query_q_counter_in, in, clear, reset); - MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id); - return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); -} -EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); - -struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, +struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev, int res_num, enum mlx5_res_type res_type) { u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN); - struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_qp_table *table = &dev->qp_table; return mlx5_get_rsc(table, rsn); } -EXPORT_SYMBOL_GPL(mlx5_core_res_hold); void mlx5_core_res_put(struct mlx5_core_rsc_common *res) { mlx5_core_put_rsc(res); } -EXPORT_SYMBOL_GPL(mlx5_core_res_put); diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index 8fc3630a9d4c..c851570791af 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -5,9 +5,9 @@ #include #include -#include #include "mlx5_ib.h" #include "srq.h" +#include "qp.h" static int get_pas_size(struct mlx5_srq_attr *in) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 6d32915000fc..d3c7dbd7f1d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o # mlx5 core basic # mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ - health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ + health.o mcg.o cq.o alloc.o port.o mr.o pd.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 818edc63e428..8379b24cb838 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include "mlx5_core.h" @@ -91,8 +90,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen) { int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); - u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; - u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; + u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; struct mlx5_eq_comp *eq; int err; @@ -142,20 +140,17 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, err_cq_add: mlx5_eq_del_cq(&eq->core, cq); err_cmd: - memset(din, 0, sizeof(din)); - memset(dout, 0, sizeof(dout)); MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); MLX5_SET(destroy_cq_in, din, uid, cq->uid); - mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); + mlx5_cmd_exec_in(dev, destroy_cq, din); return err; } EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) { - u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; int err; mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); @@ -164,7 +159,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); MLX5_SET(destroy_cq_in, in, uid, cq->uid); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_in(dev, destroy_cq, in); if (err) return err; @@ -179,20 +174,20 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) EXPORT_SYMBOL(mlx5_core_destroy_cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *out, int outlen) + u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {}; MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ); MLX5_SET(query_cq_in, in, cqn, cq->cqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec_inout(dev, query_cq, in, out); } EXPORT_SYMBOL(mlx5_core_query_cq); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {}; MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); MLX5_SET(modify_cq_in, in, uid, cq->uid); @@ -205,7 +200,7 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, u16 cq_period, u16 cq_max_count) { - u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; void *cqc; MLX5_SET(modify_cq_in, in, cqn, cq->cqn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 04854e5fbcd7..6409090b3ec5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -101,15 +101,15 @@ void mlx5_unregister_debugfs(void) void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) { - atomic_set(&dev->num_qps, 0); - dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); } +EXPORT_SYMBOL(mlx5_qp_debugfs_init); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) { debugfs_remove_recursive(dev->priv.qp_debugfs); } +EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup); void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) { @@ -202,42 +202,37 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int index, int *is_str) { - int outlen = MLX5_ST_SZ_BYTES(query_qp_out); - struct mlx5_qp_context *ctx; + u32 out[MLX5_ST_SZ_BYTES(query_qp_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {}; u64 param = 0; - u32 *out; + int state; + u32 *qpc; int err; - int no_sq; - out = kzalloc(outlen, GFP_KERNEL); - if (!out) - return param; - - err = mlx5_core_qp_query(dev, qp, out, outlen); - if (err) { - mlx5_core_warn(dev, "failed to query qp err=%d\n", err); - goto out; - } + MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); + MLX5_SET(query_qp_in, in, qpn, qp->qpn); + err = mlx5_cmd_exec_inout(dev, query_qp, in, out); + if (err) + return 0; *is_str = 0; - /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ - ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc); - + qpc = MLX5_ADDR_OF(query_qp_out, out, qpc); switch (index) { case QP_PID: param = qp->pid; break; case QP_STATE: - param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); + state = MLX5_GET(qpc, qpc, state); + param = (unsigned long)mlx5_qp_state_str(state); *is_str = 1; break; case QP_XPORT: - param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); + param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st)); *is_str = 1; break; case QP_MTU: - switch (ctx->mtu_msgmax >> 5) { + switch (MLX5_GET(qpc, qpc, mtu)) { case IB_MTU_256: param = 256; break; @@ -258,46 +253,31 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, } break; case QP_N_RECV: - param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); + param = 1 << MLX5_GET(qpc, qpc, log_rq_size); break; case QP_RECV_SZ: - param = 1 << ((ctx->rq_size_stride & 7) + 4); + param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4); break; case QP_N_SEND: - no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15; - if (!no_sq) - param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11); - else - param = 0; + if (!MLX5_GET(qpc, qpc, no_sq)) + param = 1 << MLX5_GET(qpc, qpc, log_sq_size); break; case QP_LOG_PG_SZ: - param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f; - param += 12; + param = MLX5_GET(qpc, qpc, log_page_size) + 12; break; case QP_RQPN: - param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff; + param = MLX5_GET(qpc, qpc, remote_qpn); break; } -out: - kfree(out); return param; } -static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - u32 *out, int outlen) -{ - u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {}; - - MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); - MLX5_SET(query_eq_in, in, eq_number, eq->eqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); -} - static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, int index) { int outlen = MLX5_ST_SZ_BYTES(query_eq_out); + u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {}; u64 param = 0; void *ctx; u32 *out; @@ -307,7 +287,9 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, if (!out) return param; - err = mlx5_core_eq_query(dev, eq, out, outlen); + MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); + MLX5_SET(query_eq_in, in, eq_number, eq->eqn); + err = mlx5_cmd_exec_inout(dev, query_eq, in, out); if (err) { mlx5_core_warn(dev, "failed to query eq\n"); goto out; @@ -344,7 +326,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, if (!out) return param; - err = mlx5_core_query_cq(dev, cq, out, outlen); + err = mlx5_core_query_cq(dev, cq, out); if (err) { mlx5_core_warn(dev, "failed to query cq\n"); goto out; @@ -461,6 +443,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) return err; } +EXPORT_SYMBOL(mlx5_debug_qp_add); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) { @@ -470,6 +453,7 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) if (qp->dbg) rem_res_tree(qp->dbg); } +EXPORT_SYMBOL(mlx5_debug_qp_remove); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c index d2228e37450f..a894ea98c95a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -8,33 +8,13 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1; } -static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev) -{ - u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {}; - u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; - - MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); - MLX5_SET(enable_hca_in, in, function_id, 0); - MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); - return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); -} - -static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev) -{ - u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {}; - u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; - - MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); - MLX5_SET(disable_hca_in, in, function_id, 0); - MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) { + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; int err; - err = mlx5_peer_pf_enable_hca(dev); + MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); + err = mlx5_cmd_exec_in(dev, enable_hca, in); if (err) mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n", err); @@ -44,9 +24,11 @@ static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev) { + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; int err; - err = mlx5_peer_pf_disable_hca(dev); + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + err = mlx5_cmd_exec_in(dev, disable_hca, in); if (err) { mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 23701c0e36ec..e8508c74eaa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1013,7 +1013,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, const struct mlx5e_tirc_config *ttconfig, void *tirc, bool inner); -void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen); +void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in); struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt); struct mlx5e_xsk_param; @@ -1103,8 +1103,8 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); #endif -int mlx5e_create_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir, u32 *in, int inlen); +int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, + u32 *in); void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 3a199a03d929..7283443868f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -43,7 +43,7 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) void *cqc; int err; - err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out)); + err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c index 7cd5b02e0f10..8fe8b4d6ad1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -38,12 +38,11 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv) void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) { - u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {}; - u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {}; MLX5_SET(arm_monitor_counter_in, in, opcode, MLX5_CMD_OP_ARM_MONITOR_COUNTER); - mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in); } static void mlx5e_monitor_counters_work(struct work_struct *work) @@ -66,19 +65,6 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb, return NOTIFY_OK; } -static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv) -{ - MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler, - MONITOR_COUNTER); - mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb); -} - -static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv) -{ - mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb); - cancel_work_sync(&priv->monitor_counters_work); -} - static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in) { enum mlx5_monitor_counter_ppcnt ppcnt_cnt; @@ -118,8 +104,7 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv) int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 : MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); - u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; - u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; int q_counter = priv->q_counter; int cnt = 0; @@ -136,34 +121,31 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv) MLX5_SET(set_monitor_counter_in, in, opcode, MLX5_CMD_OP_SET_MONITOR_COUNTER); - mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(mdev, set_monitor_counter, in); } /* check if mlx5e_monitor_counter_supported before calling this function*/ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv) { INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work); - mlx5e_monitor_counter_start(priv); + MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler, + MONITOR_COUNTER); + mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb); + mlx5e_set_monitor_counter(priv); mlx5e_monitor_counter_arm(priv); queue_work(priv->wq, &priv->update_stats_work); } -static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv) -{ - u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; - u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {}; - - MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0); - MLX5_SET(set_monitor_counter_in, in, opcode, - MLX5_CMD_OP_SET_MONITOR_COUNTER); - - mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); -} - /* check if mlx5e_monitor_counter_supported before calling this function*/ void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv) { - mlx5e_monitor_counter_disable(priv); - mlx5e_monitor_counter_stop(priv); + u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; + + MLX5_SET(set_monitor_counter_in, in, opcode, + MLX5_CMD_OP_SET_MONITOR_COUNTER); + + mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in); + mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb); + cancel_work_sync(&priv->monitor_counters_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index a172c5e39710..1079558d292a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -384,7 +384,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv, char *modact; int err, i; - action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); + action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); flow_action_for_each(i, act, flow_action) { switch (act->id) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index f7890e0ce96c..af3228b3f303 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -36,12 +36,11 @@ * Global resources are common to all the netdevices crated on the same nic. */ -int mlx5e_create_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir, u32 *in, int inlen) +int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) { int err; - err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn); + err = mlx5_core_create_tir(mdev, in, &tir->tirn); if (err) return err; @@ -167,7 +166,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) mutex_lock(&mdev->mlx5e_res.td.list_lock); list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { tirn = tir->tirn; - err = mlx5_core_modify_tir(mdev, tirn, in, inlen); + err = mlx5_core_modify_tir(mdev, tirn, in); if (err) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 4ab78b5c2393..0279bb7246e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1204,7 +1204,7 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, } if (hash_changed) - mlx5e_modify_tirs_hash(priv, in, inlen); + mlx5e_modify_tirs_hash(priv, in); mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 3bc2ac3d53fc..83c9b2bbc4af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -858,7 +858,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, goto out; priv->rss_params.rx_hash_fields[tt] = rx_hash_field; - mlx5e_modify_tirs_hash(priv, in, inlen); + mlx5e_modify_tirs_hash(priv, in); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47396f1b02f4..10c933e5da9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -721,7 +721,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) MLX5_SET(modify_rq_in, in, rq_state, curr_state); MLX5_SET(rqc, rqc, state, next_state); - err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + err = mlx5_core_modify_rq(mdev, rq->rqn, in); kvfree(in); @@ -752,7 +752,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) MLX5_SET(rqc, rqc, scatter_fcs, enable); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); - err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + err = mlx5_core_modify_rq(mdev, rq->rqn, in); kvfree(in); @@ -781,7 +781,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) MLX5_SET(rqc, rqc, vsd, vsd); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); - err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + err = mlx5_core_modify_rq(mdev, rq->rqn, in); kvfree(in); @@ -1259,7 +1259,7 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); } - err = mlx5_core_modify_sq(mdev, sqn, in, inlen); + err = mlx5_core_modify_sq(mdev, sqn, in); kvfree(in); @@ -2698,7 +2698,7 @@ static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, ttconfig->rx_hash_fields = rx_hash_fields; } -void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) +void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) { void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); struct mlx5e_rss_params *rss = &priv->rss_params; @@ -2714,7 +2714,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) mlx5e_update_rx_hash_fields(&ttconfig, tt, rss->rx_hash_fields[tt]); mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); - mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); } if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) @@ -2725,8 +2725,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) mlx5e_update_rx_hash_fields(&ttconfig, tt, rss->rx_hash_fields[tt]); mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); - mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, - inlen); + mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); } } @@ -2752,15 +2751,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, - inlen); + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); if (err) goto free_in; } for (ix = 0; ix < priv->max_nch; ix++) { - err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, - in, inlen); + err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in); if (err) goto free_in; } @@ -3247,7 +3244,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) if (mlx5_lag_is_lacp_owner(mdev)) MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); - return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn); + return mlx5_core_create_tis(mdev, in, tisn); } void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) @@ -3365,7 +3362,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) tir = &priv->indir_tir[tt]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tt, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in, inlen); + err = mlx5e_create_tir(priv->mdev, tir, in); if (err) { mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); goto err_destroy_inner_tirs; @@ -3380,7 +3377,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) tir = &priv->inner_indir_tir[i]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in, inlen); + err = mlx5e_create_tir(priv->mdev, tir, in); if (err) { mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); goto err_destroy_inner_tirs; @@ -3423,7 +3420,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) tir = &tirs[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in, inlen); + err = mlx5e_create_tir(priv->mdev, tir, in); if (unlikely(err)) goto err_destroy_ch_tirs; } @@ -5035,29 +5032,40 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) void mlx5e_create_q_counters(struct mlx5e_priv *priv) { + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; int err; - err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter); - if (err) { - mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); - priv->q_counter = 0; - } + MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); + err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); + if (!err) + priv->q_counter = + MLX5_GET(alloc_q_counter_out, out, counter_set_id); - err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter); - if (err) { - mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err); - priv->drop_rq_q_counter = 0; - } + err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); + if (!err) + priv->drop_rq_q_counter = + MLX5_GET(alloc_q_counter_out, out, counter_set_id); } void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { - if (priv->q_counter) - mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; - if (priv->drop_rq_q_counter) - mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter); + MLX5_SET(dealloc_q_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_Q_COUNTER); + if (priv->q_counter) { + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, + priv->q_counter); + mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); + } + + if (priv->drop_rq_q_counter) { + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, + priv->drop_rq_q_counter); + mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); + } } static int mlx5e_nic_init(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6eb0e8236bbf..f009fe09e99b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -411,18 +411,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) { struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; - u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; + int ret; - if (priv->q_counter && - !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, - sizeof(out))) - qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, - out, out_of_buffer); - if (priv->drop_rq_q_counter && - !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0, - out, sizeof(out))) - qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out, - out_of_buffer); + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + + if (priv->q_counter) { + MLX5_SET(query_q_counter_in, in, counter_set_id, + priv->q_counter); + ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); + if (!ret) + qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, + out, out_of_buffer); + } + + if (priv->drop_rq_q_counter) { + MLX5_SET(query_q_counter_in, in, counter_set_id, + priv->drop_rq_q_counter); + ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); + if (!ret) + qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, + out, out_of_buffer); + } } #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) @@ -480,18 +491,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) { u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; - int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out); - u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) return; - MLX5_SET(query_vnic_env_in, in, opcode, - MLX5_CMD_OP_QUERY_VNIC_ENV); - MLX5_SET(query_vnic_env_in, in, op_mod, 0); - MLX5_SET(query_vnic_env_in, in, other_vport, 0); - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); + MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); + mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); } #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) @@ -566,15 +573,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) { - int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 *out = (u32 *)priv->stats.vport.query_vport_out; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); - MLX5_SET(query_vport_counter_in, in, op_mod, 0); - MLX5_SET(query_vport_counter_in, in, other_vport, 0); - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); + mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out); } #define PPORT_802_3_OFF(c) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7d2b05576f44..77397aa66810 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -61,7 +61,7 @@ #include "lib/geneve.h" #include "diag/en_tc_tracepoint.h" -#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) +#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) struct mlx5_nic_flow_attr { u32 action; @@ -573,7 +573,7 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) { - u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {}; void *tirc; int err; @@ -587,7 +587,7 @@ static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]); MLX5_SET(tirc, tirc, transport_domain, hp->tdn); - err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn); + err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn); if (err) goto create_tir_err; @@ -671,7 +671,7 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false); err = mlx5_core_create_tir(hp->func_mdev, in, - MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]); + &hp->indir_tirn[tt]); if (err) { mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); goto err_destroy_tirs; @@ -2671,7 +2671,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, set_vals = &hdrs[0].vals; add_vals = &hdrs[1].vals; - action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); + action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); for (i = 0; i < ARRAY_SIZE(fields); i++) { bool skip; @@ -2804,7 +2804,7 @@ int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions) return 0; - action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); + action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev, namespace); @@ -4905,7 +4905,7 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) reg_c0 = 0; - reg_c1 = be32_to_cpu(cqe->imm_inval_pkey); + reg_c1 = be32_to_cpu(cqe->ft_metadata); if (!reg_c0) return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index cccea3a8eddd..4d974b5405b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -36,7 +36,6 @@ #include #include #include -#include #ifdef CONFIG_RFS_ACCEL #include #endif @@ -102,12 +101,11 @@ struct mlx5_eq_table { static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) { - u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {}; MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); MLX5_SET(destroy_eq_in, in, eq_number, eqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_eq, in); } /* caller must eventually call mlx5_cq_put on the returned cq */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c index 029001040737..d5bf908dfecd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c @@ -274,7 +274,7 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw, static int create_fdb_chain_restore(struct fdb_chain *fdb_chain) { - char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)]; + char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; struct mlx5_eswitch *esw = fdb_chain->esw; struct mlx5_modify_hdr *mod_hdr; u32 index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 7f618a443bfd..c5eb4e7754a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -84,8 +84,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { - int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; - int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; void *nic_vport_ctx; MLX5_SET(modify_nic_vport_context_in, in, @@ -108,40 +107,24 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_promisc_change, 1); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in); } /* E-Switch vport context HW commands */ int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, - bool other_vport, - void *in, int inlen) + bool other_vport, void *in) { - u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0}; - MLX5_SET(modify_esw_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); - return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); -} - -int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, - bool other_vport, - void *out, int outlen) -{ - u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; - - MLX5_SET(query_esw_vport_context_in, in, opcode, - MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); - MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in); } static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, u16 vlan, u8 qos, u8 set_flags) { - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) @@ -170,8 +153,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(modify_esw_vport_context_in, in, field_select.vport_cvlan_insert, 1); - return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, - in, sizeof(in)); + return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); } /* E-Switch FDB */ @@ -1901,7 +1883,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) MLX5_SET(query_esw_functions_in, in, opcode, MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); + err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out); if (!err) return out; @@ -2783,8 +2765,8 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, { struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; - struct mlx5_vport_drop_stats stats = {0}; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; + struct mlx5_vport_drop_stats stats = {}; int err = 0; u32 *out; @@ -2801,7 +2783,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); MLX5_SET(query_vport_counter_in, in, other_vport, 1); - err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen); + err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); if (err) goto free_out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index c1848b57f61c..4a1c6c78bb14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -329,11 +329,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, - bool other_vport, - void *in, int inlen); -int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, - bool other_vport, - void *out, int outlen); + bool other_vport, void *in); struct mlx5_flow_spec; struct mlx5_esw_flow_attr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b2e38e0cde97..781c1d184b60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -784,7 +784,8 @@ static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) { u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; + u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; u8 curr, wanted; int err; @@ -792,8 +793,9 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) !mlx5_eswitch_vport_match_metadata_enabled(esw)) return 0; - err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false, - out, sizeof(out)); + MLX5_SET(query_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); + err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); if (err) return err; @@ -808,14 +810,12 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) else curr &= ~wanted; - MLX5_SET(modify_esw_vport_context_in, in, + MLX5_SET(modify_esw_vport_context_in, min, esw_vport_context.fdb_to_vport_reg_c_id, curr); - - MLX5_SET(modify_esw_vport_context_in, in, + MLX5_SET(modify_esw_vport_context_in, min, field_select.fdb_to_vport_reg_c_id, 1); - err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in, - sizeof(in)); + err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); if (!err) { if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; @@ -1468,7 +1468,7 @@ query_vports: out: *mode = mlx5_mode; return 0; -} +} static void esw_destroy_restore_table(struct mlx5_eswitch *esw) { @@ -1484,7 +1484,7 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw) static int esw_create_restore_table(struct mlx5_eswitch *esw) { - u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {}; + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; @@ -1894,7 +1894,7 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {}; + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_flow_act flow_act = {}; int err = 0; u32 key; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c index c0fd2212e890..9a37077152aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c @@ -31,7 +31,6 @@ */ #include -#include #include #include @@ -143,15 +142,15 @@ int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query) int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc, u32 *fpga_qpn) { - u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)]; + u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)] = {}; + u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {}; int ret; MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP); memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc, MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc)); - ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + ret = mlx5_cmd_exec_inout(dev, fpga_create_qp, in, out); if (ret) return ret; @@ -165,8 +164,7 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn, enum mlx5_fpga_qpc_field_select fields, void *fpga_qpc) { - u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)]; + u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {}; MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP); MLX5_SET(fpga_modify_qp_in, in, field_select, fields); @@ -174,20 +172,20 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn, memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc, MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc)); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, fpga_modify_qp, in); } int mlx5_fpga_query_qp(struct mlx5_core_dev *dev, u32 fpga_qpn, void *fpga_qpc) { - u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)]; + u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)] = {}; + u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {}; int ret; MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP); MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn); - ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + ret = mlx5_cmd_exec_inout(dev, fpga_query_qp, in, out); if (ret) return ret; @@ -198,20 +196,19 @@ int mlx5_fpga_query_qp(struct mlx5_core_dev *dev, int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn) { - u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)]; + u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {}; MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP); MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, fpga_destroy_qp, in); } int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn, bool clear, struct mlx5_fpga_qp_counters *data) { - u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)]; + u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)] = {}; + u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {}; int ret; MLX5_SET(fpga_query_qp_counters_in, in, opcode, @@ -219,7 +216,7 @@ int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn, MLX5_SET(fpga_query_qp_counters_in, in, clear, clear); MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn); - ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + ret = mlx5_cmd_exec_inout(dev, fpga_query_qp_counters, in, out); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 61021133029e..182d3ac3e73f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -165,7 +165,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn, ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | MLX5_OPCODE_SEND); - ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8)); + ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8)); conn->qp.sq.pc++; conn->qp.sq.bufs[ix] = buf; @@ -362,23 +362,6 @@ static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn) conn->fdev->conn_res.uar->map, conn->cq.wq.cc); } -static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq, - enum mlx5_event event) -{ - struct mlx5_fpga_conn *conn; - - conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); - mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn); -} - -static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event) -{ - struct mlx5_fpga_conn *conn; - - conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp); - mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn); -} - static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn, unsigned int budget) { @@ -493,7 +476,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) *conn->cq.mcq.arm_db = 0; conn->cq.mcq.vector = 0; conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; - conn->cq.mcq.event = mlx5_fpga_conn_cq_event; conn->cq.mcq.irqn = irqn; conn->cq.mcq.uar = fdev->conn_res.uar; tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet, @@ -534,8 +516,9 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, unsigned int tx_size, unsigned int rx_size) { struct mlx5_fpga_device *fdev = conn->fdev; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; struct mlx5_core_dev *mdev = fdev->mdev; - u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0}; + u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {}; void *in = NULL, *qpc; int err, inlen; @@ -600,12 +583,13 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); - err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); if (err) goto err_sq_bufs; - conn->qp.mqp.event = mlx5_fpga_conn_event; - mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn); + conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn); + mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn); goto out; @@ -658,7 +642,13 @@ static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn) static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn) { - mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp); + struct mlx5_core_dev *dev = conn->fdev->mdev; + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; + + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn); + mlx5_cmd_exec_in(dev, destroy_qp, in); + mlx5_fpga_conn_free_recv_bufs(conn); mlx5_fpga_conn_flush_send_bufs(conn); kvfree(conn->qp.sq.bufs); @@ -666,30 +656,29 @@ static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn) mlx5_wq_destroy(&conn->qp.wq_ctrl); } -static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn) +static int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn) { struct mlx5_core_dev *mdev = conn->fdev->mdev; + u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {}; - mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn); + mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn); - return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL, - &conn->qp.mqp); + MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP); + MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn); + + return mlx5_cmd_exec_in(mdev, qp_2rst, in); } -static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn) +static int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn) { + u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; - u32 *qpc = NULL; - int err; + u32 *qpc; - mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn); + mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn); - qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); - if (!qpc) { - err = -ENOMEM; - goto out; - } + qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); @@ -700,32 +689,22 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn) MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); - err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc, - &conn->qp.mqp); - if (err) { - mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); - goto out; - } + MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); + MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn); -out: - kfree(qpc); - return err; + return mlx5_cmd_exec_in(mdev, rst2init_qp, in); } -static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn) +static int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn) { + u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; - u32 *qpc = NULL; - int err; + u32 *qpc; mlx5_fpga_dbg(conn->fdev, "QP RTR\n"); - qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); - if (!qpc) { - err = -ENOMEM; - goto out; - } + qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc); MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES); MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg)); @@ -745,33 +724,22 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn) MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip), MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip)); - err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc, - &conn->qp.mqp); - if (err) { - mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); - goto out; - } + MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); + MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn); -out: - kfree(qpc); - return err; + return mlx5_cmd_exec_in(mdev, init2rtr_qp, in); } -static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn) +static int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; + u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; struct mlx5_core_dev *mdev = fdev->mdev; - u32 *qpc = NULL; - u32 opt_mask; - int err; + u32 *qpc; mlx5_fpga_dbg(conn->fdev, "QP RTS\n"); - qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); - if (!qpc) { - err = -ENOMEM; - goto out; - } + qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc); MLX5_SET(qpc, qpc, log_ack_req_freq, 8); MLX5_SET(qpc, qpc, min_rnr_nak, 0x12); @@ -781,17 +749,11 @@ static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn) MLX5_SET(qpc, qpc, retry_count, 7); MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */ - opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT; - err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc, - &conn->qp.mqp); - if (err) { - mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); - goto out; - } + MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); + MLX5_SET(rtr2rts_qp_in, in, qpn, conn->qp.qpn); + MLX5_SET(rtr2rts_qp_in, in, opt_param_mask, MLX5_QP_OPTPAR_RNR_TIMEOUT); -out: - kfree(qpc); - return err; + return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); } static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn) @@ -931,7 +893,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev, MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1); MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0); MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY); - MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn); + MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.qpn); MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7); MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7); @@ -972,19 +934,11 @@ out: void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn) { - struct mlx5_fpga_device *fdev = conn->fdev; - struct mlx5_core_dev *mdev = fdev->mdev; - int err = 0; - conn->qp.active = false; tasklet_disable(&conn->cq.tasklet); synchronize_irq(conn->cq.mcq.irqn); mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn); - err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL, - &conn->qp.mqp); - if (err) - mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err); mlx5_fpga_conn_destroy_qp(conn); mlx5_fpga_conn_destroy_cq(conn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h index 634ae10e287b..5116e869a6e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h @@ -65,7 +65,7 @@ struct mlx5_fpga_conn { int sgid_index; struct mlx5_wq_qp wq; struct mlx5_wq_ctrl wq_ctrl; - struct mlx5_core_qp mqp; + u32 qpn; struct { spinlock_t lock; /* Protects all SQ state */ unsigned int pc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 90048697b2ff..1a8e826ac86b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -155,8 +155,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) { - u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; struct mlx5_core_dev *dev = ns->dev; if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && @@ -167,13 +166,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); - if (disconnect) { + if (disconnect) MLX5_SET(set_flow_table_root_in, in, op_mod, 1); - MLX5_SET(set_flow_table_root_in, in, table_id, 0); - } else { - MLX5_SET(set_flow_table_root_in, in, op_mod, 0); + else MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); - } MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); if (ft->vport) { @@ -181,7 +177,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, MLX5_SET(set_flow_table_root_in, in, other_vport, 1); } - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, set_flow_table_root, in); } static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, @@ -192,8 +188,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION); - u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {}; struct mlx5_core_dev *dev = ns->dev; int err; @@ -239,7 +235,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, break; } - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out); if (!err) ft->id = MLX5_GET(create_flow_table_out, out, table_id); @@ -249,8 +245,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {}; struct mlx5_core_dev *dev = ns->dev; MLX5_SET(destroy_flow_table_in, in, opcode, @@ -262,15 +257,14 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_SET(destroy_flow_table_in, in, other_vport, 1); } - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_flow_table, in); } static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { - u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {}; struct mlx5_core_dev *dev = ns->dev; MLX5_SET(modify_flow_table_in, in, opcode, @@ -310,7 +304,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, } } - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_flow_table, in); } static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, @@ -318,8 +312,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, u32 *in, struct mlx5_flow_group *fg) { - u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {}; struct mlx5_core_dev *dev = ns->dev; int err; @@ -332,7 +325,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, MLX5_SET(create_flow_group_in, in, other_vport, 1); } - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); if (!err) fg->id = MLX5_GET(create_flow_group_out, out, group_id); @@ -343,8 +336,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg) { - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; struct mlx5_core_dev *dev = ns->dev; MLX5_SET(destroy_flow_group_in, in, opcode, @@ -357,7 +349,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, MLX5_SET(destroy_flow_group_in, in, other_vport, 1); } - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_flow_group, in); } static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, @@ -600,8 +592,7 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) { - u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; struct mlx5_core_dev *dev = ns->dev; MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); @@ -613,22 +604,22 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, MLX5_SET(delete_fte_in, in, other_vport, 1); } - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, delete_fte, in); } int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, u32 *id) { - u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {}; int err; MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_FLOW_COUNTER); MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out); if (!err) *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); return err; @@ -641,21 +632,20 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) { - u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {}; MLX5_SET(dealloc_flow_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in); } int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + - MLX5_ST_SZ_BYTES(traffic_counter)] = {0}; - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; + MLX5_ST_SZ_BYTES(traffic_counter)] = {}; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; void *stats; int err = 0; @@ -683,11 +673,10 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, u32 *out) { int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len); - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); - MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id); MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); @@ -700,7 +689,7 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, enum mlx5_flow_namespace_type namespace, struct mlx5_pkt_reformat *pkt_reformat) { - u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)]; + u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {}; struct mlx5_core_dev *dev = ns->dev; void *packet_reformat_context_in; int max_encap_size; @@ -732,7 +721,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, reformat_data); inlen = reformat - (void *)in + size; - memset(in, 0, inlen); MLX5_SET(alloc_packet_reformat_context_in, in, opcode, MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, @@ -741,7 +729,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, reformat_type, reformat_type); memcpy(reformat, reformat_data, size); - memset(out, 0, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out, @@ -753,17 +740,15 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, struct mlx5_pkt_reformat *pkt_reformat) { - u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)]; + u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {}; struct mlx5_core_dev *dev = ns->dev; - memset(in, 0, sizeof(in)); MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, pkt_reformat->id); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in); } static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, @@ -771,7 +756,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, void *modify_actions, struct mlx5_modify_hdr *modify_hdr) { - u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)]; + u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {}; int max_actions, actions_size, inlen, err; struct mlx5_core_dev *dev = ns->dev; void *actions_in; @@ -806,7 +791,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, return -EOPNOTSUPP; } - actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions; + actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions; inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size; in = kzalloc(inlen, GFP_KERNEL); @@ -821,7 +806,6 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions); memcpy(actions_in, modify_actions, actions_size); - memset(out, 0, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); @@ -832,17 +816,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, struct mlx5_modify_hdr *modify_hdr) { - u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)]; + u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {}; struct mlx5_core_dev *dev = ns->dev; - memset(in, 0, sizeof(in)); MLX5_SET(dealloc_modify_header_context_in, in, opcode, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, modify_hdr->id); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in); } static const struct mlx5_flow_cmds mlx5_flow_cmds = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 90e3d0233101..a5fbe7343508 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -31,7 +31,6 @@ */ #include -#include #include #include #include "mlx5_core.h" @@ -68,26 +67,19 @@ enum { MCQI_FW_STORED_VERSION = 1, }; -static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, - int outlen) -{ - u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0}; - - MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); -} - int mlx5_query_board_id(struct mlx5_core_dev *dev) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); + u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; int err; out = kzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - err = mlx5_cmd_query_adapter(dev, out, outlen); + MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); + err = mlx5_cmd_exec_inout(dev, query_adapter, in, out); if (err) goto out; @@ -106,13 +98,15 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); + u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; int err; out = kzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - err = mlx5_cmd_query_adapter(mdev, out, outlen); + MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); + err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out); if (err) goto out; @@ -260,8 +254,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) { - u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {}; int i; MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA); @@ -272,16 +265,15 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) sw_owner_id[i]); } - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, init_hca, in); } int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) { - u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, teardown_hca, in); } int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) @@ -316,8 +308,8 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) { unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS; - u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; int state; int ret; @@ -330,7 +322,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); - ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index f99e1752d4e5..c0cfbab15fe9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -36,7 +36,6 @@ #include #include #include -#include #include "mlx5_core.h" #include "lib/eq.h" #include "lib/mlx5.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 673aaa815f57..068578be00f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -160,45 +160,54 @@ int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5i_priv *ipriv = priv->ppriv; - struct mlx5_core_qp *qp = &ipriv->qp; - struct mlx5_qp_context *context; int ret; - /* QP states */ - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; + { + u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; + u32 *qpc; - context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); - context->pri_path.port = 1; - context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index); - context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); + qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); - if (ret) { - mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); - goto err_qp_modify_to_err; + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, primary_address_path.pkey_index, + ipriv->pkey_index); + MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); + MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY); + + MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); + MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn); + ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in); + if (ret) + goto err_qp_modify_to_err; } - memset(context, 0, sizeof(*context)); + { + u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); - if (ret) { - mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); - goto err_qp_modify_to_err; + MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); + MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn); + ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in); + if (ret) + goto err_qp_modify_to_err; } + { + u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); - if (ret) { - mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); - goto err_qp_modify_to_err; + MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); + MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn); + ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); + if (ret) + goto err_qp_modify_to_err; } - - kfree(context); return 0; err_qp_modify_to_err: - mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp); - kfree(context); + { + u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {}; + + MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP); + MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn); + mlx5_cmd_exec_in(mdev, qp_2err, in); + } return ret; } @@ -206,30 +215,24 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_qp_context context; - int err; + u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {}; - err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, - &ipriv->qp); - if (err) - mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err); + MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP); + MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn); + mlx5_cmd_exec_in(mdev, qp_2rst, in); } #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 -int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) +int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) { - u32 *in = NULL; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; + u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; + struct mlx5i_priv *ipriv = priv->ppriv; void *addr_path; int ret = 0; - int inlen; void *qpc; - inlen = MLX5_ST_SZ_BYTES(create_qp_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); @@ -240,20 +243,23 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp MLX5_SET(ads, addr_path, vhca_port_num, 1); MLX5_SET(ads, addr_path, grh, 1); - ret = mlx5_core_create_qp(mdev, qp, in, inlen); - if (ret) { - mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret); - goto out; - } + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); + ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out); + if (ret) + return ret; -out: - kvfree(in); - return ret; + ipriv->qpn = MLX5_GET(create_qp_out, out, qpn); + + return 0; } -void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) +void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) { - mlx5_core_destroy_qp(mdev, qp); + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; + + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, qpn); + mlx5_cmd_exec_in(mdev, destroy_qp, in); } int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) @@ -273,13 +279,13 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) struct mlx5i_priv *ipriv = priv->ppriv; int err; - err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); + err = mlx5i_create_underlay_qp(priv); if (err) { mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); return err; } - err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]); + err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); goto err_destroy_underlay_qp; @@ -288,7 +294,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return 0; err_destroy_underlay_qp: - mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); + mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); return err; } @@ -297,7 +303,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) struct mlx5i_priv *ipriv = priv->ppriv; mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); - mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); + mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); } static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) @@ -500,12 +506,12 @@ int mlx5i_dev_init(struct net_device *dev) struct mlx5i_priv *ipriv = priv->ppriv; /* Set dev address using underlay QP */ - dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff; - dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; - dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; + dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff; + dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff; + dev->dev_addr[3] = (ipriv->qpn) & 0xff; /* Add QPN to net-device mapping to HT */ - mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn); + mlx5i_pkey_add_qpn(dev, ipriv->qpn); return 0; } @@ -532,7 +538,7 @@ void mlx5i_dev_cleanup(struct net_device *dev) mlx5i_uninit_underlay_qp(priv); /* Delete QPN to net-device mapping from HT */ - mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn); + mlx5i_pkey_del_qpn(dev, ipriv->qpn); } static int mlx5i_open(struct net_device *netdev) @@ -552,7 +558,7 @@ static int mlx5i_open(struct net_device *netdev) goto err_clear_state_opened_flag; } - err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); + err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn); if (err) { mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); goto err_reset_qp; @@ -569,7 +575,7 @@ static int mlx5i_open(struct net_device *netdev) return 0; err_remove_fs_underlay_qp: - mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); err_reset_qp: mlx5i_uninit_underlay_qp(epriv); err_clear_state_opened_flag: @@ -595,7 +601,7 @@ static int mlx5i_close(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &epriv->state); netif_carrier_off(epriv->netdev); - mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); mlx5e_deactivate_priv_channels(epriv); mlx5e_close_channels(&epriv->channels); mlx5i_uninit_underlay_qp(epriv); @@ -614,11 +620,12 @@ static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, struct mlx5i_priv *ipriv = epriv->ppriv; int err; - mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); - err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn); + mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, + gid->raw); + err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn); if (err) mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", - ipriv->qp.qpn, gid->raw); + ipriv->qpn, gid->raw); if (set_qkey) { mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", @@ -637,12 +644,13 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, struct mlx5i_priv *ipriv = epriv->ppriv; int err; - mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); + mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, + gid->raw); - err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); + err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn); if (err) mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", - ipriv->qp.qpn, gid->raw); + ipriv->qpn, gid->raw); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index de7e01a027bb..3483ba642cfe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -51,7 +51,7 @@ extern const struct ethtool_ops mlx5i_pkey_ethtool_ops; /* ipoib rdma netdev's private data structure */ struct mlx5i_priv { struct rdma_netdev rn; /* keep this first */ - struct mlx5_core_qp qp; + u32 qpn; bool sub_interface; u32 qkey; u16 pkey_index; @@ -62,8 +62,8 @@ struct mlx5i_priv { int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn); /* Underlay QP create/destroy functions */ -int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp); -void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp); +int mlx5i_create_underlay_qp(struct mlx5e_priv *priv); +void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn); /* Underlay QP state modification init/uninit functions */ int mlx5i_init_underlay_qp(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 96e64187c089..b9af37ad40bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -204,13 +204,13 @@ static int mlx5i_pkey_open(struct net_device *netdev) goto err_release_lock; } - err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); + err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn); if (err) { mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err); goto err_unint_underlay_qp; } - err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]); + err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]); if (err) { mlx5_core_warn(mdev, "create child tis failed, %d\n", err); goto err_remove_rx_uderlay_qp; @@ -230,7 +230,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) err_clear_state_opened_flag: mlx5e_destroy_tis(mdev, epriv->tisn[0][0]); err_remove_rx_uderlay_qp: - mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); err_unint_underlay_qp: mlx5i_uninit_underlay_qp(epriv); err_release_lock: @@ -253,7 +253,7 @@ static int mlx5i_pkey_close(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); netif_carrier_off(priv->netdev); - mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); mlx5i_uninit_underlay_qp(priv); mlx5e_deactivate_priv_channels(priv); mlx5e_close_channels(&priv->channels); @@ -307,23 +307,20 @@ static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv) static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv) { - struct mlx5i_priv *ipriv = priv->ppriv; int err; - err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); - if (err) { + err = mlx5i_create_underlay_qp(priv); + if (err) mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err); - return err; - } - return 0; + return err; } static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; - mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); + mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); } static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 93052b07c76c..c6ad5ca46877 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -47,8 +47,7 @@ static DEFINE_MUTEX(lag_mutex); static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, u8 remap_port2) { - u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); @@ -56,14 +55,13 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, create_lag, in); } static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, u8 remap_port2) { - u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); @@ -72,52 +70,29 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev) -{ - u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0}; - - MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_lag, in); } int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev) { - u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {}; MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, create_vport_lag, in); } EXPORT_SYMBOL(mlx5_cmd_create_vport_lag); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) { - u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {}; MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_vport_lag, in); } EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); -static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, - bool reset, void *out, int out_size) -{ - u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { }; - - MLX5_SET(query_cong_statistics_in, in, opcode, - MLX5_CMD_OP_QUERY_CONG_STATISTICS); - MLX5_SET(query_cong_statistics_in, in, clear, reset); - return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); -} - int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev) { @@ -232,12 +207,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, static int mlx5_deactivate_lag(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; bool roce_lag = __mlx5_lag_is_roce(ldev); int err; ldev->flags &= ~MLX5_LAG_MODE_FLAGS; - err = mlx5_cmd_destroy_lag(dev0); + MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); + err = mlx5_cmd_exec_in(dev0, destroy_lag, in); if (err) { if (roce_lag) { mlx5_core_err(dev0, @@ -758,7 +735,12 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, } for (i = 0; i < num_ports; ++i) { - ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen); + u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {}; + + MLX5_SET(query_cong_statistics_in, in, opcode, + MLX5_CMD_OP_QUERY_CONG_STATISTICS); + ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in, + out); if (ret) goto unlock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c index 6cbccba56f70..3d5e57ff558c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c @@ -90,7 +90,8 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev) } int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, - u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id) + u64 length, u32 log_alignment, u16 uid, + phys_addr_t *addr, u32 *obj_id) { u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev)); u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; @@ -99,6 +100,7 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, unsigned long *block_map; u64 icm_start_addr; u32 log_icm_size; + u64 align_mask; u32 max_blocks; u64 block_idx; void *sw_icm; @@ -136,11 +138,14 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, return -EOPNOTSUPP; max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); + + if (log_alignment < MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) + log_alignment = MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); + align_mask = BIT(log_alignment - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - 1; + spin_lock(&dm->lock); - block_idx = bitmap_find_next_zero_area(block_map, - max_blocks, - 0, - num_blocks, 0); + block_idx = bitmap_find_next_zero_area(block_map, max_blocks, 0, + num_blocks, align_mask); if (block_idx < max_blocks) bitmap_set(block_map, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c index 7722a3f9bb68..a68738c8f4bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c @@ -124,8 +124,7 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, const u8 *mac, bool vlan, u16 vlan_id, u8 port_num) { #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {}; void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr, source_l3_address); @@ -153,6 +152,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, MLX5_SET(set_roce_address_in, in, roce_address_index, index); MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, set_roce_address, in); } EXPORT_SYMBOL(mlx5_core_roce_gid_set); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 3118e8d66407..fd8449ff9e17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -40,8 +40,7 @@ /* HW L2 Table (MPFS) management */ static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac) { - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {}; u8 *in_mac_addr; MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); @@ -50,17 +49,16 @@ static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac) in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); ether_addr_copy(&in_mac_addr[2], mac); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, set_l2_table_entry, in); } static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) { - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {}; MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, delete_l2_table_entry, in); } /* UC L2 table hash node */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c index 48b5c847b642..8809a65ecefb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c @@ -4,7 +4,6 @@ #include #include #include -#include #include "mlx5_core.h" #include "lib/port_tun.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index 148b55c3db7a..82c766a95165 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -60,24 +60,22 @@ static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { - u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {}; MLX5_SET(add_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, add_vxlan_udp_dport, in); } static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) { - u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {}; MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in); } static struct mlx5_vxlan_port* diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5a97e98e937c..fbbf51026b52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -206,8 +206,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) { int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in, driver_version); - u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0}; - u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0}; + u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {}; int remaining_size = driver_ver_sz; char *string; @@ -234,7 +233,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) MLX5_SET(set_driver_version_in, in, opcode, MLX5_CMD_OP_SET_DRIVER_VERSION); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, set_driver_version, in); } static int set_dma_caps(struct pci_dev *pdev) @@ -366,7 +365,7 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); if (err) { mlx5_core_warn(dev, "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", @@ -407,30 +406,25 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); } -static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod) +static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod) { - u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; - MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1); - return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + return mlx5_cmd_exec_in(dev, set_hca_cap, in); } -static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) +static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx) { - void *set_ctx; void *set_hca_cap; - int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); int req_endianness; int err; - if (MLX5_CAP_GEN(dev, atomic)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); - if (err) - return err; - } else { + if (!MLX5_CAP_GEN(dev, atomic)) return 0; - } + + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); + if (err) + return err; req_endianness = MLX5_CAP_ATOMIC(dev, @@ -439,27 +433,18 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) return 0; - set_ctx = kzalloc(set_sz, GFP_KERNEL); - if (!set_ctx) - return -ENOMEM; - set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); /* Set requestor to host endianness */ MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode, MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); - err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC); - - kfree(set_ctx); - return err; + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC); } -static int handle_hca_cap_odp(struct mlx5_core_dev *dev) +static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) { void *set_hca_cap; - void *set_ctx; - int set_sz; bool do_set = false; int err; @@ -471,11 +456,6 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev) if (err) return err; - set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); - set_ctx = kzalloc(set_sz, GFP_KERNEL); - if (!set_ctx) - return -ENOMEM; - set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP], MLX5_ST_SZ_BYTES(odp_cap)); @@ -504,30 +484,21 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev) ODP_CAP_SET_MAX(dev, dc_odp_caps.read); ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic); - if (do_set) - err = set_caps(dev, set_ctx, set_sz, - MLX5_SET_HCA_CAP_OP_MOD_ODP); + if (!do_set) + return 0; - kfree(set_ctx); - - return err; + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP); } -static int handle_hca_cap(struct mlx5_core_dev *dev) +static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) { - void *set_ctx = NULL; struct mlx5_profile *prof = dev->profile; - int err = -ENOMEM; - int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); void *set_hca_cap; - - set_ctx = kzalloc(set_sz, GFP_KERNEL); - if (!set_ctx) - goto query_ex; + int err; err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); if (err) - goto query_ex; + return err; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); @@ -578,37 +549,73 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) num_vhca_ports, MLX5_CAP_GEN_MAX(dev, num_vhca_ports)); - err = set_caps(dev, set_ctx, set_sz, - MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); +} -query_ex: - kfree(set_ctx); +static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx) +{ + void *set_hca_cap; + int err; + + if (!MLX5_CAP_GEN(dev, roce)) + return 0; + + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE); + if (err) + return err; + + if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) || + !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port)) + return 0; + + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE], + MLX5_ST_SZ_BYTES(roce_cap)); + MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1); + + err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE); return err; } static int set_hca_cap(struct mlx5_core_dev *dev) { + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + void *set_ctx; int err; - err = handle_hca_cap(dev); + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) + return -ENOMEM; + + err = handle_hca_cap(dev, set_ctx); if (err) { mlx5_core_err(dev, "handle_hca_cap failed\n"); goto out; } - err = handle_hca_cap_atomic(dev); + memset(set_ctx, 0, set_sz); + err = handle_hca_cap_atomic(dev, set_ctx); if (err) { mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); goto out; } - err = handle_hca_cap_odp(dev); + memset(set_ctx, 0, set_sz); + err = handle_hca_cap_odp(dev, set_ctx); if (err) { mlx5_core_err(dev, "handle_hca_cap_odp failed\n"); goto out; } + memset(set_ctx, 0, set_sz); + err = handle_hca_cap_roce(dev, set_ctx); + if (err) { + mlx5_core_err(dev, "handle_hca_cap_roce failed\n"); + goto out; + } + out: + kfree(set_ctx); return err; } @@ -642,26 +649,24 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) { - u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, function_id, func_id); MLX5_SET(enable_hca_in, in, embedded_cpu_function, dev->caps.embedded_cpu); - return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + return mlx5_cmd_exec_in(dev, enable_hca, in); } int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) { - u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); MLX5_SET(disable_hca_in, in, function_id, func_id); MLX5_SET(enable_hca_in, in, embedded_cpu_function, dev->caps.embedded_cpu); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, disable_hca, in); } u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, @@ -686,14 +691,13 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, static int mlx5_core_set_issi(struct mlx5_core_dev *dev) { - u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; - u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; + u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {}; + u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {}; u32 sup_issi; int err; MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); - err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), - query_out, sizeof(query_out)); + err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out); if (err) { u32 syndrome; u8 status; @@ -713,13 +717,11 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); if (sup_issi & (1 << 1)) { - u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; - u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; + u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {}; MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); MLX5_SET(set_issi_in, set_in, current_issi, 1); - err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), - set_out, sizeof(set_out)); + err = mlx5_cmd_exec_in(dev, set_issi, set_in); if (err) { mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n", err); @@ -836,8 +838,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) mlx5_cq_debugfs_init(dev); - mlx5_init_qp_table(dev); - mlx5_init_reserved_gids(dev); mlx5_init_clock(dev); @@ -896,7 +896,6 @@ err_rl_cleanup: err_tables_cleanup: mlx5_geneve_destroy(dev->geneve); mlx5_vxlan_destroy(dev->vxlan); - mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_events_cleanup(dev); err_eq_cleanup: @@ -924,7 +923,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_clock(dev); mlx5_cleanup_reserved_gids(dev); - mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_events_cleanup(dev); mlx5_eq_table_cleanup(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index ba2b09cc192f..e019d68062d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -33,34 +33,31 @@ #include #include #include -#include #include #include "mlx5_core.h" int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) { - u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {}; void *gid; MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG); MLX5_SET(attach_to_mcg_in, in, qpn, qpn); gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid); memcpy(gid, mgid, sizeof(*mgid)); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, attach_to_mcg, in); } EXPORT_SYMBOL(mlx5_core_attach_mcg); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) { - u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {}; void *gid; MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG); MLX5_SET(detach_from_mcg_in, in, qpn, qpn); gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid); memcpy(gid, mgid, sizeof(*mgid)); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, detach_from_mcg, in); } EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 366f2cbfc6db..9eb51f06d3ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -33,14 +33,13 @@ #include #include #include -#include #include "mlx5_core.h" int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen) { - u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; + u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {}; u32 mkey_index; void *mkc; int err; @@ -66,19 +65,18 @@ EXPORT_SYMBOL(mlx5_core_create_mkey); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey) { - u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {}; MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_mkey, in); } EXPORT_SYMBOL(mlx5_core_destroy_mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {}; memset(out, 0, outlen); MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY); @@ -100,8 +98,8 @@ static inline u32 mlx5_get_psv(u32 *out, int psv_index) int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index) { - u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {}; + u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {}; int i, err; if (npsvs > MLX5_MAX_PSVS) @@ -111,7 +109,7 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, MLX5_SET(create_psv_in, in, pd, pdn); MLX5_SET(create_psv_in, in, num_psv, npsvs); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, create_psv, in, out); if (err) return err; @@ -124,11 +122,10 @@ EXPORT_SYMBOL(mlx5_core_create_psv); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) { - u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {}; MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV); MLX5_SET(destroy_psv_in, in, psvn, psv_num); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_psv, in); } EXPORT_SYMBOL(mlx5_core_destroy_psv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 91bd258ecf1b..3d6f617abb7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -35,7 +35,6 @@ #include #include #include -#include #include "mlx5_core.h" #include "lib/eq.h" @@ -136,8 +135,8 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { - u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {}; int err; MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); @@ -146,7 +145,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, query_pages, in, out); if (err) return err; @@ -257,8 +256,7 @@ err_mapping: static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, bool ec_function) { - u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int err; MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); @@ -266,7 +264,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_in(dev, manage_pages, in); if (err) mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", func_id, err); @@ -374,7 +372,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed, bool ec_function) { int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int num_claimed; u32 *out; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c index bd830d8d6c5f..aabc53ad8bdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -33,17 +33,16 @@ #include #include #include -#include #include "mlx5_core.h" int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) { - u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; int err; MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, alloc_pd, in, out); if (!err) *pdn = MLX5_GET(alloc_pd_out, out, pd); return err; @@ -52,11 +51,10 @@ EXPORT_SYMBOL(mlx5_core_alloc_pd); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) { - u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {}; MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD); MLX5_SET(dealloc_pd_in, in, pd, pdn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, dealloc_pd, in); } EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index cc262b30aed5..9f829e68fc73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -763,24 +763,23 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) { - u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {}; MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL); MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1); MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, set_wol_rol, in); } EXPORT_SYMBOL_GPL(mlx5_set_port_wol); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) { - u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {}; int err; MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, query_wol_rol, in, out); if (!err) *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index f3b29d9ade1f..99039c47ef33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -33,15 +33,14 @@ #include #include #include -#include #include "mlx5_core.h" /* Scheduling element fw management */ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *ctx, u32 *element_id) { - u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {}; + u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {}; void *schedc; int err; @@ -53,7 +52,7 @@ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, hierarchy); memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context)); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out); if (err) return err; @@ -66,8 +65,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *ctx, u32 element_id, u32 modify_bitmask) { - u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {}; void *schedc; schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in, @@ -82,14 +80,13 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, hierarchy); memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context)); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_scheduling_element, in); } int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id) { - u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {}; MLX5_SET(destroy_scheduling_element_in, in, opcode, MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT); @@ -98,7 +95,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy, hierarchy); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in); } static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in, @@ -145,8 +142,7 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry, bool set) { - u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {}; - u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {}; + u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {}; void *pp_context; pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx); @@ -156,7 +152,7 @@ static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index); if (set) memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw)); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in); } bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 461b39376daf..6bd34b293007 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -18,7 +18,7 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev, MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport); MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out); if (err) return err; @@ -51,7 +51,7 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out); if (err) { kfree(out); return err; @@ -141,7 +141,7 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev, MLX5_SET(query_flow_table_in, in, table_type, type); MLX5_SET(query_flow_table_in, in, table_id, table_id); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out); if (err) return err; @@ -158,12 +158,11 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev, int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev) { - u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {}; u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {}; MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, sync_steering, in); } int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, @@ -214,14 +213,13 @@ int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, u32 table_type, u32 table_id) { - u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {}; u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, table_type); MLX5_SET(delete_fte_in, in, table_id, table_id); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, delete_fte, in); } int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev, @@ -263,7 +261,6 @@ out: int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev, u32 modify_header_id) { - u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {}; u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {}; MLX5_SET(dealloc_modify_header_context_in, in, opcode, @@ -271,7 +268,7 @@ int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev, MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, modify_header_id); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in); } int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, @@ -292,7 +289,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, MLX5_SET(create_flow_group_in, in, table_type, table_type); MLX5_SET(create_flow_group_in, in, table_id, table_id); - err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out); if (err) goto out; @@ -309,14 +306,14 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev, u32 group_id) { u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {}; - MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET(destroy_flow_group_in, in, opcode, + MLX5_CMD_OP_DESTROY_FLOW_GROUP); MLX5_SET(destroy_flow_group_in, in, table_type, table_type); MLX5_SET(destroy_flow_group_in, in, table_id, table_id); MLX5_SET(destroy_flow_group_in, in, group_id, group_id); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, destroy_flow_group, in); } int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, @@ -360,7 +357,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, attr->reformat_en); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out); if (err) return err; @@ -379,7 +376,6 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev, u32 table_id, u32 table_type) { - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {}; MLX5_SET(destroy_flow_table_in, in, opcode, @@ -387,7 +383,7 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev, MLX5_SET(destroy_flow_table_in, in, table_type, table_type); MLX5_SET(destroy_flow_table_in, in, table_id, table_id); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, destroy_flow_table, in); } int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, @@ -434,7 +430,6 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev, u32 reformat_id) { - u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {}; u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {}; MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, @@ -442,7 +437,7 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev, MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, reformat_id); - mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in); } int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, @@ -458,7 +453,7 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, MLX5_SET(query_roce_address_in, in, roce_address_index, index); MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index 30d2d7376f56..cc33515b9aba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -95,13 +95,12 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev, } static struct mlx5dr_icm_mr * -dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool, - enum mlx5_sw_icm_type type, - size_t align_base) +dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) { struct mlx5_core_dev *mdev = pool->dmn->mdev; + enum mlx5_sw_icm_type dm_type; struct mlx5dr_icm_mr *icm_mr; - size_t align_diff; + size_t log_align_base; int err; icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL); @@ -111,14 +110,22 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool, icm_mr->pool = pool; INIT_LIST_HEAD(&icm_mr->mr_list); - icm_mr->dm.type = type; - - /* 2^log_biggest_table * entry-size * double-for-alignment */ icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, - pool->icm_type) * 2; + pool->icm_type); - err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0, - &icm_mr->dm.addr, &icm_mr->dm.obj_id); + if (pool->icm_type == DR_ICM_TYPE_STE) { + dm_type = MLX5_SW_ICM_TYPE_STEERING; + log_align_base = ilog2(icm_mr->dm.length); + } else { + dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY; + /* Align base is 64B */ + log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE); + } + icm_mr->dm.type = dm_type; + + err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, + log_align_base, 0, &icm_mr->dm.addr, + &icm_mr->dm.obj_id); if (err) { mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err); goto free_icm_mr; @@ -137,15 +144,18 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool, icm_mr->icm_start_addr = icm_mr->dm.addr; - /* align_base is always a power of 2 */ - align_diff = icm_mr->icm_start_addr & (align_base - 1); - if (align_diff) - icm_mr->used_length = align_base - align_diff; + if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) { + mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n", + log_align_base); + goto free_mkey; + } list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list); return icm_mr; +free_mkey: + mlx5_core_destroy_mkey(mdev, &icm_mr->mkey); free_dm: mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0, icm_mr->dm.addr, icm_mr->dm.obj_id); @@ -200,24 +210,11 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) struct mlx5dr_icm_pool *pool = bucket->pool; struct mlx5dr_icm_mr *icm_mr = NULL; struct mlx5dr_icm_chunk *chunk; - enum mlx5_sw_icm_type dm_type; - size_t align_base; int i, err = 0; mr_req_size = bucket->num_of_entries * bucket->entry_size; mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, pool->icm_type); - - if (pool->icm_type == DR_ICM_TYPE_STE) { - dm_type = MLX5_SW_ICM_TYPE_STEERING; - /* Align base is the biggest chunk size / row size */ - align_base = mr_row_size; - } else { - dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY; - /* Align base is 64B */ - align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE; - } - mutex_lock(&pool->mr_mutex); if (!list_empty(&pool->icm_mr_list)) { icm_mr = list_last_entry(&pool->icm_mr_list, @@ -228,7 +225,7 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) } if (!icm_mr || mr_free_size < mr_row_size) { - icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base); + icm_mr = dr_icm_pool_mr_create(pool); if (!icm_mr) { err = -ENOMEM; goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index c0ab9cf74929..c4ed25bb9ac8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -100,14 +100,10 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne) return err == CQ_POLL_ERR ? err : npolled; } -static void dr_qp_event(struct mlx5_core_qp *mqp, int event) -{ - pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn); -} - static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, struct dr_qp_init_attr *attr) { + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {}; struct mlx5_wq_param wqp; struct mlx5dr_qp *dr_qp; @@ -180,14 +176,12 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); - err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen); + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn); kfree(in); - - if (err) { - mlx5_core_warn(mdev, " Can't create QP\n"); + if (err) goto err_in; - } - dr_qp->mqp.event = dr_qp_event; dr_qp->uar = attr->uar; return dr_qp; @@ -204,7 +198,12 @@ err_wq: static void dr_destroy_qp(struct mlx5_core_dev *mdev, struct mlx5dr_qp *dr_qp) { - mlx5_core_destroy_qp(mdev, &dr_qp->mqp); + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; + + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn); + mlx5_cmd_exec_in(mdev, destroy_qp, in); + kfree(dr_qp->sq.wqe_head); mlx5_wq_destroy(&dr_qp->wq_ctrl); kfree(dr_qp); @@ -242,7 +241,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, MLX5_WQE_CTRL_CQ_UPDATE : 0; wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) | opcode); - wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8); + wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8); wq_raddr = (void *)(wq_ctrl + 1); wq_raddr->raddr = cpu_to_be64(remote_addr); wq_raddr->rkey = cpu_to_be32(rkey); @@ -585,8 +584,10 @@ static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, rre, 1); MLX5_SET(qpc, qpc, rwe, 1); - return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc, - &dr_qp->mqp); + MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); + MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn); + + return mlx5_cmd_exec_in(mdev, rst2init_qp, in); } static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev, @@ -598,14 +599,15 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev, qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc); - MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn); + MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn); - MLX5_SET(qpc, qpc, log_ack_req_freq, 0); MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); - return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc, - &dr_qp->mqp); + MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); + MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn); + + return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); } static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, @@ -617,7 +619,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc); - MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn); + MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn); MLX5_SET(qpc, qpc, mtu, attr->mtu); MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1); @@ -636,8 +638,10 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num); MLX5_SET(qpc, qpc, min_rnr_nak, 1); - return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc, - &dr_qp->mqp); + MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); + MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn); + + return mlx5_cmd_exec_in(mdev, init2rtr_qp, in); } static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) @@ -663,7 +667,7 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) return ret; rtr_attr.mtu = mtu; - rtr_attr.qp_num = dr_qp->mqp.qpn; + rtr_attr.qp_num = dr_qp->qpn; rtr_attr.min_rnr_timer = 12; rtr_attr.port_num = port; rtr_attr.sgid_index = gid_index; @@ -689,12 +693,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) return 0; } -static void dr_cq_event(struct mlx5_core_cq *mcq, - enum mlx5_event event) -{ - pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); -} - static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, struct mlx5_uars_page *uar, size_t ncqe) @@ -755,8 +753,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); - cq->mcq.event = dr_cq_event; - err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 3fa739951b34..984783238baa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -990,7 +990,7 @@ struct mlx5dr_qp { struct mlx5_wq_qp wq; struct mlx5_uars_page *uar; struct mlx5_wq_ctrl wq_ctrl; - struct mlx5_core_qp mqp; + u32 qpn; struct { unsigned int pc; unsigned int cc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3b3f5b9d4f95..8887b2440c7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -576,7 +576,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, struct mlx5dr_action *action; size_t actions_sz; - actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * + actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions; action = mlx5dr_action_create_modify_header(dr_domain, 0, actions_sz, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index b1068500f1df..01cc00ad8acf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -36,14 +36,14 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) { - u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {}; int err; MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out); if (!err) *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain); @@ -54,19 +54,18 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) { - u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {}; MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, dealloc_transport_domain, in); } EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) { - u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {}; int err; MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); @@ -78,44 +77,39 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) } EXPORT_SYMBOL(mlx5_core_create_rq); -int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in) { - u32 out[MLX5_ST_SZ_DW(modify_rq_out)]; - MLX5_SET(modify_rq_in, in, rqn, rqn); MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_rq, in); } EXPORT_SYMBOL(mlx5_core_modify_rq); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) { - u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, destroy_rq, in); } EXPORT_SYMBOL(mlx5_core_destroy_rq); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0}; - int outlen = MLX5_ST_SZ_BYTES(query_rq_out); + u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {}; MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ); MLX5_SET(query_rq_in, in, rqn, rqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec_inout(dev, query_rq, in, out); } EXPORT_SYMBOL(mlx5_core_query_rq); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) { - u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {}; int err; MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); @@ -126,34 +120,30 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) return err; } -int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in) { - u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; - MLX5_SET(modify_sq_in, in, sqn, sqn); MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); - return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_sq, in); } EXPORT_SYMBOL(mlx5_core_modify_sq); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) { - u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {}; MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); MLX5_SET(destroy_sq_in, in, sqn, sqn); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, destroy_sq, in); } int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0}; - int outlen = MLX5_ST_SZ_BYTES(query_sq_out); + u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {}; MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ); MLX5_SET(query_sq_in, in, sqn, sqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec_inout(dev, query_sq, in, out); } EXPORT_SYMBOL(mlx5_core_query_sq); @@ -182,24 +172,13 @@ out: } EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state); -int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, - u32 *in, int inlen, - u32 *out, int outlen) -{ - MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); - - return mlx5_cmd_exec(dev, in, inlen, out, outlen); -} -EXPORT_SYMBOL(mlx5_core_create_tir_out); - -int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tirn) +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn) { u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; int err; - err = mlx5_core_create_tir_out(dev, in, inlen, - out, sizeof(out)); + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + err = mlx5_cmd_exec_inout(dev, create_tir, in, out); if (!err) *tirn = MLX5_GET(create_tir_out, out, tirn); @@ -207,35 +186,30 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, } EXPORT_SYMBOL(mlx5_core_create_tir); -int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, - int inlen) +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in) { - u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0}; - MLX5_SET(modify_tir_in, in, tirn, tirn); MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); - return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_tir, in); } void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) { - u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {}; MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); MLX5_SET(destroy_tir_in, in, tirn, tirn); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, destroy_tir, in); } EXPORT_SYMBOL(mlx5_core_destroy_tir); -int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tisn) +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn) { - u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {}; int err; MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, create_tis, in, out); if (!err) *tisn = MLX5_GET(create_tis_out, out, tisn); @@ -243,33 +217,29 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, } EXPORT_SYMBOL(mlx5_core_create_tis); -int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, - int inlen) +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in) { - u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0}; - MLX5_SET(modify_tis_in, in, tisn, tisn); MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS); - return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec_in(dev, modify_tis, in); } EXPORT_SYMBOL(mlx5_core_modify_tis); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) { - u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {}; MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); MLX5_SET(destroy_tis_in, in, tisn, tisn); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, destroy_tis, in); } EXPORT_SYMBOL(mlx5_core_destroy_tis); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn) { - u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {}; int err; MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); @@ -284,7 +254,7 @@ EXPORT_SYMBOL(mlx5_core_create_rqt); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {}; MLX5_SET(modify_rqt_in, in, rqtn, rqtn); MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); @@ -293,12 +263,11 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) { - u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {}; MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); - mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec_in(dev, destroy_rqt, in); } EXPORT_SYMBOL(mlx5_core_destroy_rqt); @@ -383,7 +352,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn, int curr_state, int next_state, u16 peer_vhca, u32 peer_sq) { - u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {}; void *rqc; rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); @@ -396,8 +365,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn, MLX5_SET(modify_rq_in, in, rq_state, curr_state); MLX5_SET(rqc, rqc, state, next_state); - return mlx5_core_modify_rq(func_mdev, rqn, - in, MLX5_ST_SZ_BYTES(modify_rq_in)); + return mlx5_core_modify_rq(func_mdev, rqn, in); } static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, @@ -417,8 +385,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); - return mlx5_core_modify_sq(peer_mdev, sqn, - in, MLX5_ST_SZ_BYTES(modify_sq_in)); + return mlx5_core_modify_sq(peer_mdev, sqn, in); } static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 0d006224d7b0..da481a7c12f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -34,17 +34,16 @@ #include #include #include -#include #include "mlx5_core.h" int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { - u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {}; int err; MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out); if (!err) *uarn = MLX5_GET(alloc_uar_out, out, uar); return err; @@ -53,12 +52,11 @@ EXPORT_SYMBOL(mlx5_cmd_alloc_uar); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) { - u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {}; MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); MLX5_SET(dealloc_uar_in, in, uar, uarn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(dev, dealloc_uar, in); } EXPORT_SYMBOL(mlx5_cmd_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 23f879da9104..c107d92dc118 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -40,10 +40,11 @@ /* Mutex to hold while enabling or disabling RoCE */ static DEFINE_MUTEX(mlx5_roce_en_lock); -static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, - u16 vport, u32 *out, int outlen) +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) { - u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {}; + int err; MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE); @@ -52,14 +53,9 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, if (vport) MLX5_SET(query_vport_state_in, in, other_vport, 1); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); -} - -u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) -{ - u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0}; - - _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out); + if (err) + return 0; return MLX5_GET(query_vport_state_out, out, state); } @@ -67,8 +63,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 other_vport, u8 state) { - u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {}; MLX5_SET(modify_vport_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VPORT_STATE); @@ -77,13 +72,13 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, MLX5_SET(modify_vport_state_in, in, other_vport, other_vport); MLX5_SET(modify_vport_state_in, in, admin_state, state); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_cmd_exec_in(mdev, modify_vport_state, in); } static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, - u32 *out, int outlen) + u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); @@ -91,26 +86,16 @@ static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); -} - -static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, - int inlen) -{ - u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; - - MLX5_SET(modify_nic_vport_context_in, in, opcode, - MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out); } int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 *min_inline) { - u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {}; int err; - err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out)); + err = mlx5_query_nic_vport_context(mdev, vport, out); if (!err) *min_inline = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.min_wqe_inline_mode); @@ -139,8 +124,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline) { - u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; - int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; void *nic_vport_ctx; MLX5_SET(modify_nic_vport_context_in, in, @@ -152,23 +136,20 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, in, nic_vport_context); MLX5_SET(nic_vport_context, nic_vport_ctx, min_wqe_inline_mode, min_inline); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - return mlx5_modify_nic_vport_context(mdev, in, inlen); + return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); } int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, bool other, u8 *addr) { - int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; u8 *out_addr; - u32 *out; int err; - out = kvzalloc(outlen, GFP_KERNEL); - if (!out) - return -ENOMEM; - out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, nic_vport_context.permanent_address); @@ -177,11 +158,10 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); MLX5_SET(query_nic_vport_context_in, in, other_vport, other); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); + err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out); if (!err) ether_addr_copy(addr, &out_addr[2]); - kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address); @@ -216,8 +196,10 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, permanent_address); ether_addr_copy(&perm_mac[2], addr); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(mdev, in, inlen); + err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); kvfree(in); @@ -235,7 +217,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); + err = mlx5_query_nic_vport_context(mdev, 0, out); if (!err) *mtu = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.mtu); @@ -257,8 +239,10 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(mdev, in, inlen); + err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); kvfree(in); return err; @@ -292,7 +276,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, req_list_size = max_list_size; } - out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); out = kzalloc(out_sz, GFP_KERNEL); @@ -332,7 +316,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, u8 addr_list[][ETH_ALEN], int list_size) { - u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {}; void *nic_vport_ctx; int max_list_size; int in_sz; @@ -350,7 +334,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + list_size * MLX5_ST_SZ_BYTES(mac_address_layout); - memset(out, 0, sizeof(out)); in = kzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -442,7 +425,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out, outlen); + mlx5_query_nic_vport_context(mdev, 0, out); *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.system_image_guid); @@ -462,7 +445,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out, outlen); + mlx5_query_nic_vport_context(mdev, 0, out); *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); @@ -498,8 +481,10 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(mdev, in, inlen); + err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); kvfree(in); @@ -516,7 +501,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out, outlen); + mlx5_query_nic_vport_context(mdev, 0, out); *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.qkey_violation_counter); @@ -664,7 +649,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *rep) { int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); - int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0}; + int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {}; int is_group_manager; void *out; void *ctx; @@ -691,7 +676,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, if (MLX5_CAP_GEN(dev, num_ports) == 2) MLX5_SET(query_hca_vport_context_in, in, port_num, port_num); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out); if (err) goto ex; @@ -788,7 +773,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); + err = mlx5_query_nic_vport_context(mdev, vport, out); if (err) goto out; @@ -825,8 +810,10 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, nic_vport_context.promisc_mc, promisc_mc); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.promisc_all, promisc_all); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(mdev, in, inlen); + err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); kvfree(in); @@ -865,8 +852,10 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable) if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) MLX5_SET(modify_nic_vport_context_in, in, field_select.disable_uc_local_lb, 1); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(mdev, in, inlen); + err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); if (!err) mlx5_core_dbg(mdev, "%s local_lb\n", @@ -888,7 +877,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status) if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); + err = mlx5_query_nic_vport_context(mdev, 0, out); if (err) goto out; @@ -925,8 +914,10 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en, state); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(mdev, in, inlen); + err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in); kvfree(in); @@ -965,16 +956,15 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) mutex_unlock(&mlx5_roce_en_lock); return err; } -EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); +EXPORT_SYMBOL(mlx5_nic_vport_disable_roce); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, - int vf, u8 port_num, void *out, - size_t out_sz) + int vf, u8 port_num, void *out) { - int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); - int is_group_manager; - void *in; - int err; + int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); + int is_group_manager; + void *in; + int err; is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in = kvzalloc(in_sz, GFP_KERNEL); @@ -997,7 +987,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, if (MLX5_CAP_GEN(dev, num_ports) == 2) MLX5_SET(query_vport_counter_in, in, port_num, port_num); - err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out); free: kvfree(in); return err; @@ -1008,8 +998,8 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down) { - u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; int err; MLX5_SET(query_vnic_env_in, in, opcode, @@ -1018,7 +1008,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, MLX5_SET(query_vnic_env_in, in, vport_number, vport); MLX5_SET(query_vnic_env_in, in, other_vport, other_vport); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); if (err) return err; @@ -1035,11 +1025,10 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *req) { int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in); - u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)]; int is_group_manager; + void *ctx; void *in; int err; - void *ctx; mlx5_core_dbg(dev, "vf %d\n", vf); is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); @@ -1047,7 +1036,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, if (!in) return -ENOMEM; - memset(out, 0, sizeof(out)); MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT); if (other_vport) { if (is_group_manager) { @@ -1074,7 +1062,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm); - err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in); ex: kfree(in); return err; @@ -1103,8 +1091,10 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.affiliation_criteria, MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria)); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(port_mdev, in, inlen); + err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in); if (err) mlx5_nic_vport_disable_roce(port_mdev); @@ -1129,8 +1119,10 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev) nic_vport_context.affiliated_vhca_id, 0); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.affiliation_criteria, 0); + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - err = mlx5_modify_nic_vport_context(port_mdev, in, inlen); + err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in); if (!err) mlx5_nic_vport_disable_roce(port_mdev); @@ -1170,4 +1162,4 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) { return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev); } -EXPORT_SYMBOL(mlx5_eswitch_get_total_vports); +EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h deleted file mode 100644 index 68cd08f02c2f..000000000000 --- a/include/linux/mlx5/cmd.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_CMD_H -#define MLX5_CMD_H - -#include - -struct manage_pages_layout { - u64 ptr; - u32 reserved; - u16 num_entries; - u16 func_id; -}; - - -struct mlx5_cmd_alloc_uar_imm_out { - u32 rsvd[3]; - u32 uarn; -}; - -#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 40748fc1b11b..b5a9399e07ee 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -188,7 +188,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *out, int outlen); + u32 *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 2b90097a6cf9..1bc27aca648b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -364,6 +364,7 @@ enum { enum { MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, + MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8, }; enum { @@ -449,10 +450,12 @@ enum { enum { MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, + MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2, }; enum { MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, + MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, }; enum { @@ -689,6 +692,19 @@ struct mlx5_eqe_temp_warning { __be64 sensor_warning_lsb; } __packed; +#define SYNC_RST_STATE_MASK 0xf + +enum sync_rst_state_type { + MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0, + MLX5_SYNC_RST_STATE_RESET_NOW = 0x1, + MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2, +}; + +struct mlx5_eqe_sync_fw_update { + u8 reserved_at_0[3]; + u8 sync_rst_state; +}; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -707,6 +723,7 @@ union ev_data { struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_xrq_err xrq_err; + struct mlx5_eqe_sync_fw_update sync_fw_update; } __packed; struct mlx5_eqe { @@ -749,7 +766,7 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 outer_l3_tunneled; + u8 tls_outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; u8 lro_tcppsh_abort_dupack; @@ -767,7 +784,12 @@ struct mlx5_cqe64 { u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ - __be32 imm_inval_pkey; + union { + __be32 immediate; + __be32 inval_rkey; + __be32 pkey; + __be32 ft_metadata; + }; u8 rsvd40[4]; __be32 byte_cnt; __be32 timestamp_h; @@ -834,7 +856,12 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { - return cqe->outer_l3_tunneled & 0x1; + return cqe->tls_outer_l3_tunneled & 0x1; +} + +static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) +{ + return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; } static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) @@ -922,6 +949,13 @@ enum { CQE_L4_OK = 1 << 2, }; +enum { + CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0, + CQE_TLS_OFFLOAD_DECRYPTED = 0x1, + CQE_TLS_OFFLOAD_RESYNC = 0x2, + CQE_TLS_OFFLOAD_ERROR = 0x3, +}; + struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; @@ -1107,6 +1141,7 @@ enum mlx5_cap_type { MLX5_CAP_TLS, MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, + MLX5_CAP_IPSEC, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1324,6 +1359,9 @@ enum mlx5_qcam_feature_groups { MLX5_GET64(device_virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) +#define MLX5_CAP_IPSEC(mdev, cap)\ + MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6f8f79ef829b..d82dbbab8179 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -130,6 +130,7 @@ enum { MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, + MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, @@ -541,7 +542,6 @@ struct mlx5_priv { struct mlx5_core_health health; /* start: qp staff */ - struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; @@ -687,7 +687,6 @@ struct mlx5_core_dev { unsigned long intf_state; struct mlx5_priv priv; struct mlx5_profile *profile; - atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; @@ -903,6 +902,19 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); + +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) + int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); @@ -1069,7 +1081,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, - u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); + u64 length, u32 log_alignment, u16 uid, + phys_addr_t *addr, u32 *obj_id); int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 69b27c7dfc3e..fb243848132d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -74,6 +74,7 @@ enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, + MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4, }; enum { @@ -885,7 +886,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_stateless_vxlan_gpe[0x1]; u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; - u8 reserved_at_2a[0x6]; + u8 insert_trailer[0x1]; + u8 reserved_at_2b[0x5]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -903,7 +905,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; - u8 reserved_at_1[0x1f]; + u8 reserved_at_1[0x3]; + u8 sw_r_roce_src_udp_port[0x1]; + u8 reserved_at_5[0x1b]; u8 reserved_at_20[0x60]; @@ -1097,6 +1101,23 @@ struct mlx5_ifc_tls_cap_bits { u8 reserved_at_20[0x7e0]; }; +struct mlx5_ifc_ipsec_cap_bits { + u8 ipsec_full_offload[0x1]; + u8 ipsec_crypto_offload[0x1]; + u8 ipsec_esn[0x1]; + u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1]; + u8 reserved_at_7[0x4]; + u8 log_max_ipsec_offload[0x5]; + u8 reserved_at_10[0x10]; + + u8 min_log_ipsec_full_replay_window[0x8]; + u8 max_log_ipsec_full_replay_window[0x8]; + u8 reserved_at_30[0x7d0]; +}; + enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, @@ -1223,7 +1244,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x9]; + u8 reserved_at_140[0x6]; + u8 release_all_pages[0x1]; + u8 reserved_at_147[0x2]; u8 roce_accl[0x1]; u8 log_max_ra_req_qp[0x6]; u8 reserved_at_150[0xa]; @@ -1296,7 +1319,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1f0[0xc]; + u8 reserved_at_1f0[0x1]; + u8 pci_sync_for_fw_update_event[0x1]; + u8 reserved_at_1f2[0xa]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; @@ -1461,13 +1486,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; - u8 reserved_at_468[0x3]; + u8 reserved_at_468[0x2]; + u8 ipsec_offload[0x1]; u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; u8 reserved_at_480[0x1]; u8 tls_tx[0x1]; - u8 reserved_at_482[0x1]; + u8 tls_rx[0x1]; u8 log_max_l2_table[0x5]; u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; @@ -3112,7 +3138,8 @@ struct mlx5_ifc_tirc_bits { u8 reserved_at_0[0x20]; u8 disp_type[0x4]; - u8 reserved_at_24[0x1c]; + u8 tls_en[0x1]; + u8 reserved_at_25[0x1b]; u8 reserved_at_40[0x40]; @@ -4140,7 +4167,8 @@ enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, - MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4 }; struct mlx5_ifc_set_fte_out_bits { @@ -5667,9 +5695,9 @@ struct mlx5_ifc_copy_action_in_bits { u8 reserved_at_38[0x8]; }; -union mlx5_ifc_set_action_in_add_action_in_auto_bits { - struct mlx5_ifc_set_action_in_bits set_action_in; - struct mlx5_ifc_add_action_in_bits add_action_in; +union mlx5_ifc_set_add_copy_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; struct mlx5_ifc_copy_action_in_bits copy_action_in; u8 reserved_at_0[0x40]; }; @@ -5743,7 +5771,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits { u8 reserved_at_68[0x10]; u8 num_of_actions[0x8]; - union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0]; + union mlx5_ifc_set_add_copy_action_in_auto_bits actions[0]; }; struct mlx5_ifc_dealloc_modify_header_context_out_bits { @@ -9680,6 +9708,29 @@ struct mlx5_ifc_mcda_reg_bits { u8 data[0][0x20]; }; +enum { + MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0), + MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1), +}; + +enum { + MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0), + MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3), + MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6), +}; + +struct mlx5_ifc_mfrl_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x2]; + u8 pci_sync_for_fw_update_start[0x1]; + u8 pci_sync_for_fw_update_resp[0x2]; + u8 rst_type_sel[0x3]; + u8 reserved_at_28[0x8]; + u8 reset_type[0x8]; + u8 reset_level[0x8]; +}; + struct mlx5_ifc_mirc_reg_bits { u8 reserved_at_0[0x18]; u8 status_code[0x8]; @@ -9743,6 +9794,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mcc_reg_bits mcc_reg; struct mlx5_ifc_mcda_reg_bits mcda_reg; struct mlx5_ifc_mirc_reg_bits mirc_reg; + struct mlx5_ifc_mfrl_reg_bits mfrl_reg; u8 reserved_at_0[0x60e0]; }; @@ -10465,10 +10517,62 @@ struct mlx5_ifc_affiliated_event_header_bits { enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, + MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, +}; + +enum { + MLX5_IPSEC_OBJECT_ICV_LEN_16B, + MLX5_IPSEC_OBJECT_ICV_LEN_12B, + MLX5_IPSEC_OBJECT_ICV_LEN_8B, +}; + +struct mlx5_ifc_ipsec_obj_bits { + u8 modify_field_select[0x40]; + u8 full_offload[0x1]; + u8 reserved_at_41[0x1]; + u8 esn_en[0x1]; + u8 esn_overlap[0x1]; + u8 reserved_at_44[0x2]; + u8 icv_length[0x2]; + u8 reserved_at_48[0x4]; + u8 aso_return_reg[0x4]; + u8 reserved_at_50[0x10]; + + u8 esn_msb[0x20]; + + u8 reserved_at_80[0x8]; + u8 dekn[0x18]; + + u8 salt[0x20]; + + u8 implicit_iv[0x40]; + + u8 reserved_at_100[0x700]; +}; + +struct mlx5_ifc_create_ipsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; +}; + +enum { + MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0), + MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1), +}; + +struct mlx5_ifc_query_ipsec_obj_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; +}; + +struct mlx5_ifc_modify_ipsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; }; struct mlx5_ifc_encryption_key_obj_bits { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index ae63b1ae9004..f23eb18526fe 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -229,6 +229,11 @@ enum { enum { MLX5_ETH_WQE_SVLAN = 1 << 0, + MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26, + MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27, + MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26, + MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28, + MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30, MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, }; @@ -257,6 +262,7 @@ struct mlx5_wqe_eth_seg { __be16 type; __be16 vlan_tci; } insert; + __be32 trailer; }; }; @@ -553,57 +559,8 @@ struct mlx5_qp_context { u8 rsvd1[24]; }; -static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) -{ - return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); -} - -int mlx5_core_create_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *qp, - u32 *in, int inlen, - u32 *out, int outlen); -int mlx5_core_create_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp, - u32 *in, - int inlen); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, - u32 opt_param_mask, void *qpc, - struct mlx5_core_qp *qp); -int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp); -int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *dct); -int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - u32 *out, int outlen); -int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, - u32 *out, int outlen); - -int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, - u32 timeout_usec); - -int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); -int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); -void mlx5_init_qp_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); -int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, - struct mlx5_core_qp *rq); -void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, - struct mlx5_core_qp *rq); -int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, - struct mlx5_core_qp *sq); -void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, - struct mlx5_core_qp *sq); -int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); -int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); -int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, - int reset, void *out, int out_size); - -struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, - int res_num, - enum mlx5_res_type res_type); -void mlx5_core_res_put(struct mlx5_core_rsc_common *res); static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index dc6b1e7cb8c4..028f442530cf 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -39,27 +39,20 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); -int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); -int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); -int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tirn); -int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, - u32 *in, int inlen, - u32 *out, int outlen); -int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, - int inlen); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tisn); -int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, - int inlen); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn); +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 16060fb9b5e5..8170da1e9f70 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -127,8 +127,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, - int vf, u8 port_num, void *out, - size_t out_sz); + int vf, u8 port_num, void *out); int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf,