mlx5-updates-2021-08-11
Misc. cleanup for mlx5. 1) Typos and use of netdev_warn() 2) smatch cleanup 3) Minor fix to inner TTC table creation 4) Dynamic capability cache allocation -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmEUE4oACgkQSD+KveBX +j7EsAf/W8hfoRYNeqf+xGwkOOUzMvSK8gNPD7f02yniqQ4MmC++v7Fg4M162uvG d87EoCNjbGLqhAe/4W8Euis+4/s6o3EAjjjdck5B/ZKEoyY/W6PJYJPDNL358yaK DsKw6oZrs02enrRbQN5/WzEjfaDSpKboD5eYqsl+g4YWu0wJt5bsDb5qj7UlGqee FIIqLeKgsbqWmpQxpBRb6jzKLB0LxN1Kk2ymy7tyq+CCEWUvtlAY+509pWn/MOyX G0rCg1Pz125ZvganFZaEDK4bKy2yAu0FmKv6CoC/LkNdLVWCRuQ1n1zR2B8Tt85U SXEcRzenTSpDQ9X4a78dB5wpdG5Y3w== =8u8S -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 updates 2021-08-11 This series provides misc updates to mlx5. For more information please see tag log below. Please pull and let me know if there is any problem. mlx5-updates-2021-08-11 Misc. cleanup for mlx5. 1) Typos and use of netdev_warn() 2) smatch cleanup 3) Minor fix to inner TTC table creation 4) Dynamic capability cache allocation ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
bed5a942e2
@ -877,7 +877,7 @@ static void cb_timeout_handler(struct work_struct *work)
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
|
||||
ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
||||
|
||||
out:
|
||||
cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
|
||||
@ -994,7 +994,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
MLX5_SET(mbox_out, ent->out, status, status);
|
||||
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
|
||||
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1008,7 +1008,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
poll_timeout(ent);
|
||||
/* make sure we read the descriptor after ownership is SW */
|
||||
rmb();
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1068,7 +1068,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
|
||||
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
||||
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
||||
}
|
||||
|
||||
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||
|
@ -520,7 +520,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
e->out_dev = attr.out_dev;
|
||||
e->route_dev_ifindex = attr.route_dev->ifindex;
|
||||
|
||||
/* It's importent to add the neigh to the hash table before checking
|
||||
/* It's important to add the neigh to the hash table before checking
|
||||
* the neigh validity state. So if we'll get a notification, in case the
|
||||
* neigh changes it's validity state, we would find the relevant neigh
|
||||
* in the hash.
|
||||
|
@ -126,7 +126,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
/* Create a separate SQ, so that when the buff pool is disabled, we could
|
||||
* close this SQ safely and stop receiving CQEs. In other case, e.g., if
|
||||
* the XDPSQ was used instead, we might run into trouble when the buff pool
|
||||
* is disabled and then reenabled, but the SQ continues receiving CQEs
|
||||
* is disabled and then re-enabled, but the SQ continues receiving CQEs
|
||||
* from the old buff pool.
|
||||
*/
|
||||
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "en.h"
|
||||
|
||||
/* mlx5e global resources should be placed in this file.
|
||||
* Global resources are common to all the netdevices crated on the same nic.
|
||||
* Global resources are common to all the netdevices created on the same nic.
|
||||
*/
|
||||
|
||||
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
|
||||
|
@ -1255,7 +1255,8 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
|
||||
return 0;
|
||||
|
||||
mlx5e_set_inner_ttc_params(priv, &ttc_params);
|
||||
priv->fs.inner_ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
|
||||
priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
|
||||
&ttc_params);
|
||||
if (IS_ERR(priv->fs.inner_ttc))
|
||||
return PTR_ERR(priv->fs.inner_ttc);
|
||||
return 0;
|
||||
|
@ -146,7 +146,7 @@ struct mlx5e_neigh_hash_entry {
|
||||
*/
|
||||
refcount_t refcnt;
|
||||
|
||||
/* Save the last reported time offloaded trafic pass over one of the
|
||||
/* Save the last reported time offloaded traffic pass over one of the
|
||||
* neigh hash entry flows. Use it to periodically update the neigh
|
||||
* 'used' value and avoid neigh deleting by the kernel.
|
||||
*/
|
||||
|
@ -97,7 +97,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
|
||||
[MARK_TO_REG] = mark_to_reg_ct,
|
||||
[LABELS_TO_REG] = labels_to_reg_ct,
|
||||
[FTEID_TO_REG] = fteid_to_reg_ct,
|
||||
/* For NIC rules we store the retore metadata directly
|
||||
/* For NIC rules we store the restore metadata directly
|
||||
* into reg_b that is passed to SW since we don't
|
||||
* jump between steering domains.
|
||||
*/
|
||||
@ -2448,7 +2448,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
|
||||
}
|
||||
}
|
||||
/* Currenlty supported only for MPLS over UDP */
|
||||
/* Currently supported only for MPLS over UDP */
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
|
||||
!netif_is_bareudp(filter_dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
@ -2702,7 +2702,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
if (s_mask && a_mask) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't set and add to the same HW field");
|
||||
printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
|
||||
netdev_warn(priv->netdev,
|
||||
"mlx5: can't set and add to the same HW field (%x)\n",
|
||||
f->field);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -2741,8 +2743,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
if (first < next_z && next_z < last) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"rewrite of few sub-fields isn't supported");
|
||||
printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
|
||||
mask);
|
||||
netdev_warn(priv->netdev,
|
||||
"mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
|
||||
mask);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -1492,7 +1492,7 @@ abort:
|
||||
/**
|
||||
* mlx5_eswitch_enable - Enable eswitch
|
||||
* @esw: Pointer to eswitch
|
||||
* @num_vfs: Enable eswitch swich for given number of VFs.
|
||||
* @num_vfs: Enable eswitch switch for given number of VFs.
|
||||
* Caller must pass num_vfs > 0 when enabling eswitch for
|
||||
* vf vports.
|
||||
* mlx5_eswitch_enable() returns 0 on success or error code on failure.
|
||||
|
@ -27,7 +27,7 @@ static int pcie_core(struct notifier_block *, unsigned long, void *);
|
||||
static int forward_event(struct notifier_block *, unsigned long, void *);
|
||||
|
||||
static struct mlx5_nb events_nbs_ref[] = {
|
||||
/* Events to be proccessed by mlx5_core */
|
||||
/* Events to be processed by mlx5_core */
|
||||
{.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
|
||||
{.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
|
||||
{.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
|
||||
|
@ -1516,7 +1516,7 @@ static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
|
||||
mutex_lock(&fpga_xfrm->lock);
|
||||
|
||||
if (!fpga_xfrm->sa_ctx)
|
||||
/* Unbounded xfrm, chane only sw attrs */
|
||||
/* Unbounded xfrm, change only sw attrs */
|
||||
goto change_sw_xfrm_attrs;
|
||||
|
||||
/* copy original hw sa */
|
||||
|
@ -2343,7 +2343,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
|
||||
|
||||
#define FLOW_TABLE_BIT_SZ 1
|
||||
#define GET_FLOW_TABLE_CAP(dev, offset) \
|
||||
((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
|
||||
((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
|
||||
offset / 32)) >> \
|
||||
(32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
|
||||
static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
|
||||
@ -2493,7 +2493,7 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
|
||||
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
|
||||
|
||||
/* If this a prio with chains, and we can jump from one chain
|
||||
* (namepsace) to another, so we accumulate the levels
|
||||
* (namespace) to another, so we accumulate the levels
|
||||
*/
|
||||
if (prio->node.type == FS_TYPE_PRIO_CHAINS)
|
||||
acc_level = acc_level_ns;
|
||||
|
@ -170,7 +170,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
|
||||
|
||||
/* The reset only needs to be issued by one PF. The health buffer is
|
||||
* shared between all functions, and will be cleared during a reset.
|
||||
* Check again to avoid a redundant 2nd reset. If the fatal erros was
|
||||
* Check again to avoid a redundant 2nd reset. If the fatal errors was
|
||||
* PCI related a reset won't help.
|
||||
*/
|
||||
fatal_error = mlx5_health_check_fatal_sensors(dev);
|
||||
@ -213,10 +213,6 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
goto unlock;/* a previous error is still being handled */
|
||||
if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
enter_error_state(dev, force);
|
||||
unlock:
|
||||
|
@ -749,7 +749,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
|
||||
} else {
|
||||
ptp_event.type = PTP_CLOCK_EXTTS;
|
||||
}
|
||||
/* TODOL clock->ptp can be NULL if ptp_clock_register failes */
|
||||
/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
|
||||
ptp_clock_event(clock->ptp, &ptp_event);
|
||||
break;
|
||||
case PTP_PF_PEROUT:
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
struct mlx5_vxlan {
|
||||
struct mlx5_core_dev *mdev;
|
||||
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
|
||||
/* max_num_ports is usually 4, 16 buckets is more than enough */
|
||||
DECLARE_HASHTABLE(htable, 4);
|
||||
struct mutex sync_lock; /* sync add/del port HW operations */
|
||||
};
|
||||
|
@ -389,11 +389,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
|
||||
|
||||
switch (cap_mode) {
|
||||
case HCA_CAP_OPMOD_GET_MAX:
|
||||
memcpy(dev->caps.hca_max[cap_type], hca_caps,
|
||||
memcpy(dev->caps.hca[cap_type]->max, hca_caps,
|
||||
MLX5_UN_SZ_BYTES(hca_cap_union));
|
||||
break;
|
||||
case HCA_CAP_OPMOD_GET_CUR:
|
||||
memcpy(dev->caps.hca_cur[cap_type], hca_caps,
|
||||
memcpy(dev->caps.hca[cap_type]->cur, hca_caps,
|
||||
MLX5_UN_SZ_BYTES(hca_cap_union));
|
||||
break;
|
||||
default:
|
||||
@ -469,7 +469,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
return err;
|
||||
|
||||
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
||||
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
|
||||
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
|
||||
MLX5_ST_SZ_BYTES(odp_cap));
|
||||
|
||||
#define ODP_CAP_SET_MAX(dev, field) \
|
||||
@ -514,7 +514,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
|
||||
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
|
||||
capability);
|
||||
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
|
||||
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur,
|
||||
MLX5_ST_SZ_BYTES(cmd_hca_cap));
|
||||
|
||||
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
|
||||
@ -596,7 +596,7 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
return 0;
|
||||
|
||||
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
||||
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
|
||||
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur,
|
||||
MLX5_ST_SZ_BYTES(roce_cap));
|
||||
MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
|
||||
|
||||
@ -748,14 +748,12 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
|
||||
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
int err = 0;
|
||||
|
||||
mutex_init(&dev->pci_status_mutex);
|
||||
pci_set_drvdata(dev->pdev, dev);
|
||||
|
||||
dev->bar_addr = pci_resource_start(pdev, 0);
|
||||
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
|
||||
|
||||
err = mlx5_pci_enable_device(dev);
|
||||
if (err) {
|
||||
@ -1249,11 +1247,6 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
mlx5_core_warn(dev, "interface is up, NOP\n");
|
||||
goto out;
|
||||
}
|
||||
/* remove any previous indication of internal error */
|
||||
dev->state = MLX5_DEVICE_STATE_UP;
|
||||
|
||||
err = mlx5_function_setup(dev, true);
|
||||
@ -1294,7 +1287,6 @@ function_teardown:
|
||||
mlx5_function_teardown(dev, true);
|
||||
err_function:
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -1381,6 +1373,60 @@ out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
}
|
||||
|
||||
static const int types[] = {
|
||||
MLX5_CAP_GENERAL,
|
||||
MLX5_CAP_GENERAL_2,
|
||||
MLX5_CAP_ETHERNET_OFFLOADS,
|
||||
MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
|
||||
MLX5_CAP_ODP,
|
||||
MLX5_CAP_ATOMIC,
|
||||
MLX5_CAP_ROCE,
|
||||
MLX5_CAP_IPOIB_OFFLOADS,
|
||||
MLX5_CAP_FLOW_TABLE,
|
||||
MLX5_CAP_ESWITCH_FLOW_TABLE,
|
||||
MLX5_CAP_ESWITCH,
|
||||
MLX5_CAP_VECTOR_CALC,
|
||||
MLX5_CAP_QOS,
|
||||
MLX5_CAP_DEBUG,
|
||||
MLX5_CAP_DEV_MEM,
|
||||
MLX5_CAP_DEV_EVENT,
|
||||
MLX5_CAP_TLS,
|
||||
MLX5_CAP_VDPA_EMULATION,
|
||||
MLX5_CAP_IPSEC,
|
||||
};
|
||||
|
||||
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int type;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(types); i++) {
|
||||
type = types[i];
|
||||
kfree(dev->caps.hca[type]);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_hca_cap *cap;
|
||||
int type;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(types); i++) {
|
||||
cap = kzalloc(sizeof(*cap), GFP_KERNEL);
|
||||
if (!cap)
|
||||
goto err;
|
||||
type = types[i];
|
||||
dev->caps.hca[type] = cap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_hca_caps_free(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
@ -1400,6 +1446,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
||||
mutex_init(&priv->pgdir_mutex);
|
||||
INIT_LIST_HEAD(&priv->pgdir_list);
|
||||
|
||||
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
|
||||
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
|
||||
mlx5_debugfs_root);
|
||||
INIT_LIST_HEAD(&priv->traps);
|
||||
@ -1416,8 +1463,14 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
||||
if (err)
|
||||
goto err_adev_init;
|
||||
|
||||
err = mlx5_hca_caps_alloc(dev);
|
||||
if (err)
|
||||
goto err_hca_caps;
|
||||
|
||||
return 0;
|
||||
|
||||
err_hca_caps:
|
||||
mlx5_adev_cleanup(dev);
|
||||
err_adev_init:
|
||||
mlx5_pagealloc_cleanup(dev);
|
||||
err_pagealloc_init:
|
||||
@ -1436,6 +1489,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
|
||||
mlx5_hca_caps_free(dev);
|
||||
mlx5_adev_cleanup(dev);
|
||||
mlx5_pagealloc_cleanup(dev);
|
||||
mlx5_health_cleanup(dev);
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
#define MLX5_SFS_PER_CTRL_IRQ 64
|
||||
#define MLX5_IRQ_CTRL_SF_MAX 8
|
||||
/* min num of vectores for SFs to be enabled */
|
||||
/* min num of vectors for SFs to be enabled */
|
||||
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
|
||||
|
||||
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
|
||||
@ -28,13 +28,13 @@
|
||||
#define MLX5_EQ_REFS_PER_IRQ (2)
|
||||
|
||||
struct mlx5_irq {
|
||||
u32 index;
|
||||
struct atomic_notifier_head nh;
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
struct kref kref;
|
||||
int irqn;
|
||||
struct mlx5_irq_pool *pool;
|
||||
int refcount;
|
||||
u32 index;
|
||||
int irqn;
|
||||
};
|
||||
|
||||
struct mlx5_irq_pool {
|
||||
@ -138,9 +138,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void irq_release(struct kref *kref)
|
||||
static void irq_release(struct mlx5_irq *irq)
|
||||
{
|
||||
struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
|
||||
struct mlx5_irq_pool *pool = irq->pool;
|
||||
|
||||
xa_erase(&pool->irqs, irq->index);
|
||||
@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq)
|
||||
struct mlx5_irq_pool *pool = irq->pool;
|
||||
|
||||
mutex_lock(&pool->lock);
|
||||
kref_put(&irq->kref, irq_release);
|
||||
irq->refcount--;
|
||||
if (!irq->refcount)
|
||||
irq_release(irq);
|
||||
mutex_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
static int irq_get_locked(struct mlx5_irq *irq)
|
||||
{
|
||||
lockdep_assert_held(&irq->pool->lock);
|
||||
if (WARN_ON_ONCE(!irq->refcount))
|
||||
return 0;
|
||||
irq->refcount++;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int irq_get(struct mlx5_irq *irq)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&irq->pool->lock);
|
||||
err = irq_get_locked(irq);
|
||||
mutex_unlock(&irq->pool->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static irqreturn_t irq_int_handler(int irq, void *nh)
|
||||
{
|
||||
atomic_notifier_call_chain(nh, 0, NULL);
|
||||
@ -214,7 +234,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
|
||||
err = -ENOMEM;
|
||||
goto err_cpumask;
|
||||
}
|
||||
kref_init(&irq->kref);
|
||||
irq->refcount = 1;
|
||||
irq->index = i;
|
||||
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
|
||||
if (err) {
|
||||
@ -235,18 +255,18 @@ err_req_irq:
|
||||
|
||||
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
|
||||
{
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
err = kref_get_unless_zero(&irq->kref);
|
||||
if (WARN_ON_ONCE(!err))
|
||||
ret = irq_get(irq);
|
||||
if (!ret)
|
||||
/* Something very bad happens here, we are enabling EQ
|
||||
* on non-existing IRQ.
|
||||
*/
|
||||
return -ENOENT;
|
||||
err = atomic_notifier_chain_register(&irq->nh, nb);
|
||||
if (err)
|
||||
ret = atomic_notifier_chain_register(&irq->nh, nb);
|
||||
if (ret)
|
||||
irq_put(irq);
|
||||
return err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
|
||||
@ -301,10 +321,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
|
||||
xa_for_each_range(&pool->irqs, index, iter, start, end) {
|
||||
if (!cpumask_equal(iter->mask, affinity))
|
||||
continue;
|
||||
if (kref_read(&iter->kref) < pool->min_threshold)
|
||||
if (iter->refcount < pool->min_threshold)
|
||||
return iter;
|
||||
if (!irq || kref_read(&iter->kref) <
|
||||
kref_read(&irq->kref))
|
||||
if (!irq || iter->refcount < irq->refcount)
|
||||
irq = iter;
|
||||
}
|
||||
return irq;
|
||||
@ -319,7 +338,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
|
||||
mutex_lock(&pool->lock);
|
||||
least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
|
||||
if (least_loaded_irq &&
|
||||
kref_read(&least_loaded_irq->kref) < pool->min_threshold)
|
||||
least_loaded_irq->refcount < pool->min_threshold)
|
||||
goto out;
|
||||
new_irq = irq_pool_create_irq(pool, affinity);
|
||||
if (IS_ERR(new_irq)) {
|
||||
@ -337,11 +356,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
|
||||
least_loaded_irq = new_irq;
|
||||
goto unlock;
|
||||
out:
|
||||
kref_get(&least_loaded_irq->kref);
|
||||
if (kref_read(&least_loaded_irq->kref) > pool->max_threshold)
|
||||
irq_get_locked(least_loaded_irq);
|
||||
if (least_loaded_irq->refcount > pool->max_threshold)
|
||||
mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
|
||||
least_loaded_irq->irqn, pool->name,
|
||||
kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ);
|
||||
least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
|
||||
unlock:
|
||||
mutex_unlock(&pool->lock);
|
||||
return least_loaded_irq;
|
||||
@ -357,7 +376,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
|
||||
mutex_lock(&pool->lock);
|
||||
irq = xa_load(&pool->irqs, vecidx);
|
||||
if (irq) {
|
||||
kref_get(&irq->kref);
|
||||
irq_get_locked(irq);
|
||||
goto unlock;
|
||||
}
|
||||
irq = irq_request(pool, vecidx);
|
||||
@ -424,7 +443,7 @@ out:
|
||||
return irq;
|
||||
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||
irq->irqn, cpumask_pr_args(affinity),
|
||||
kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ);
|
||||
irq->refcount / MLX5_EQ_REFS_PER_IRQ);
|
||||
return irq;
|
||||
}
|
||||
|
||||
@ -456,8 +475,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
|
||||
struct mlx5_irq *irq;
|
||||
unsigned long index;
|
||||
|
||||
/* There are cases in which we are destrying the irq_table before
|
||||
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
|
||||
* which might not have been freed.
|
||||
*/
|
||||
xa_for_each(&pool->irqs, index, irq)
|
||||
irq_release(&irq->kref);
|
||||
irq_release(irq);
|
||||
xa_destroy(&pool->irqs);
|
||||
kvfree(pool);
|
||||
}
|
||||
@ -479,7 +502,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
|
||||
if (!mlx5_sf_max_functions(dev))
|
||||
return 0;
|
||||
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
|
||||
mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
|
||||
mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -597,7 +620,7 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
|
||||
return;
|
||||
|
||||
/* There are cases where IRQs still will be in used when we reaching
|
||||
* to here. Hence, making sure all the irqs are realeased.
|
||||
* to here. Hence, making sure all the irqs are released.
|
||||
*/
|
||||
irq_pools_destroy(table);
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
|
@ -39,7 +39,7 @@ static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, cha
|
||||
struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
|
||||
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
|
||||
return sysfs_emit(buf, "%u\n", sf_dev->sfnum);
|
||||
}
|
||||
static DEVICE_ATTR_RO(sfnum);
|
||||
|
||||
|
@ -476,7 +476,7 @@ static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
|
||||
return;
|
||||
|
||||
/* Balances with refcount_set; drop the reference so that new user cmd cannot start
|
||||
* and new vhca event handler cannnot run.
|
||||
* and new vhca event handler cannot run.
|
||||
*/
|
||||
mlx5_sf_table_put(table);
|
||||
wait_for_completion(&table->disable_complete);
|
||||
|
@ -1038,7 +1038,7 @@ enum {
|
||||
struct mlx5_mkey_seg {
|
||||
/* This is a two bit field occupying bits 31-30.
|
||||
* bit 31 is always 0,
|
||||
* bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
|
||||
* bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
|
||||
*/
|
||||
u8 status;
|
||||
u8 pcie_control;
|
||||
@ -1157,6 +1157,9 @@ enum mlx5_cap_mode {
|
||||
HCA_CAP_OPMOD_GET_CUR = 1,
|
||||
};
|
||||
|
||||
/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
|
||||
* capability memory.
|
||||
*/
|
||||
enum mlx5_cap_type {
|
||||
MLX5_CAP_GENERAL = 0,
|
||||
MLX5_CAP_ETHERNET_OFFLOADS,
|
||||
@ -1213,55 +1216,55 @@ enum mlx5_qcam_feature_groups {
|
||||
|
||||
/* GET Dev Caps macros */
|
||||
#define MLX5_CAP_GEN(mdev, cap) \
|
||||
MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
|
||||
MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_GEN_64(mdev, cap) \
|
||||
MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
|
||||
MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_GEN_MAX(mdev, cap) \
|
||||
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
|
||||
MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
|
||||
|
||||
#define MLX5_CAP_GEN_2(mdev, cap) \
|
||||
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
|
||||
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_GEN_2_64(mdev, cap) \
|
||||
MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
|
||||
MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
|
||||
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap)
|
||||
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
|
||||
|
||||
#define MLX5_CAP_ETH(mdev, cap) \
|
||||
MLX5_GET(per_protocol_networking_offload_caps,\
|
||||
mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
|
||||
mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ETH_MAX(mdev, cap) \
|
||||
MLX5_GET(per_protocol_networking_offload_caps,\
|
||||
mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
|
||||
mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
|
||||
|
||||
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
|
||||
MLX5_GET(per_protocol_networking_offload_caps,\
|
||||
mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
|
||||
mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ROCE(mdev, cap) \
|
||||
MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
|
||||
MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
|
||||
MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
|
||||
MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
|
||||
|
||||
#define MLX5_CAP_ATOMIC(mdev, cap) \
|
||||
MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
|
||||
MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
|
||||
MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
|
||||
MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
|
||||
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
|
||||
MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
|
||||
|
||||
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
|
||||
MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
|
||||
MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
|
||||
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
|
||||
MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
|
||||
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
|
||||
@ -1301,11 +1304,11 @@ enum mlx5_qcam_feature_groups {
|
||||
|
||||
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
|
||||
MLX5_GET(flow_table_eswitch_cap, \
|
||||
mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
|
||||
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
|
||||
MLX5_GET(flow_table_eswitch_cap, \
|
||||
mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
|
||||
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
|
||||
|
||||
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
|
||||
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
|
||||
@ -1327,31 +1330,31 @@ enum mlx5_qcam_feature_groups {
|
||||
|
||||
#define MLX5_CAP_ESW(mdev, cap) \
|
||||
MLX5_GET(e_switch_cap, \
|
||||
mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
|
||||
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
|
||||
|
||||
#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
|
||||
MLX5_GET64(flow_table_eswitch_cap, \
|
||||
(mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
|
||||
(mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ESW_MAX(mdev, cap) \
|
||||
MLX5_GET(e_switch_cap, \
|
||||
mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
|
||||
mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
|
||||
|
||||
#define MLX5_CAP_ODP(mdev, cap)\
|
||||
MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
|
||||
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_ODP_MAX(mdev, cap)\
|
||||
MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
|
||||
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
|
||||
|
||||
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
|
||||
MLX5_GET(vector_calc_cap, \
|
||||
mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
|
||||
mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_QOS(mdev, cap)\
|
||||
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
|
||||
MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_DEBUG(mdev, cap)\
|
||||
MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
|
||||
MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
|
||||
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
|
||||
@ -1387,27 +1390,27 @@ enum mlx5_qcam_feature_groups {
|
||||
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
|
||||
|
||||
#define MLX5_CAP_DEV_MEM(mdev, cap)\
|
||||
MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
|
||||
MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
|
||||
|
||||
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
|
||||
MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
|
||||
MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_TLS(mdev, cap) \
|
||||
MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
|
||||
MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
|
||||
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
|
||||
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
|
||||
MLX5_GET(virtio_emulation_cap, \
|
||||
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
|
||||
(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
|
||||
|
||||
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
|
||||
MLX5_GET64(virtio_emulation_cap, \
|
||||
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
|
||||
(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
|
||||
|
||||
#define MLX5_CAP_IPSEC(mdev, cap)\
|
||||
MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap)
|
||||
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
|
||||
|
||||
enum {
|
||||
MLX5_CMD_STAT_OK = 0x0,
|
||||
|
@ -581,7 +581,7 @@ struct mlx5_priv {
|
||||
/* end: qp staff */
|
||||
|
||||
/* start: alloc staff */
|
||||
/* protect buffer alocation according to numa node */
|
||||
/* protect buffer allocation according to numa node */
|
||||
struct mutex alloc_mutex;
|
||||
int numa_node;
|
||||
|
||||
@ -623,8 +623,7 @@ struct mlx5_priv {
|
||||
};
|
||||
|
||||
enum mlx5_device_state {
|
||||
MLX5_DEVICE_STATE_UNINITIALIZED,
|
||||
MLX5_DEVICE_STATE_UP,
|
||||
MLX5_DEVICE_STATE_UP = 1,
|
||||
MLX5_DEVICE_STATE_INTERNAL_ERROR,
|
||||
};
|
||||
|
||||
@ -730,6 +729,11 @@ struct mlx5_profile {
|
||||
} mr_cache[MAX_MR_CACHE_ENTRIES];
|
||||
};
|
||||
|
||||
struct mlx5_hca_cap {
|
||||
u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
};
|
||||
|
||||
struct mlx5_core_dev {
|
||||
struct device *device;
|
||||
enum mlx5_coredev_type coredev_type;
|
||||
@ -741,8 +745,7 @@ struct mlx5_core_dev {
|
||||
char board_id[MLX5_BOARD_ID_LEN];
|
||||
struct mlx5_cmd cmd;
|
||||
struct {
|
||||
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
|
||||
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
|
||||
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
|
||||
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
|
||||
@ -1111,7 +1114,7 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
|
||||
}
|
||||
|
||||
/* Async-atomic event notifier used by mlx5 core to forward FW
|
||||
* evetns recived from event queue to mlx5 consumers.
|
||||
* evetns received from event queue to mlx5 consumers.
|
||||
* Optimise event queue dipatching.
|
||||
*/
|
||||
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
|
Loading…
Reference in New Issue
Block a user