net/mlx5: Add IRQ vector to CPU lookup function
Currently, once driver load completes, IRQ requests were performed for all vectors. However, as we move to support dynamic creation of EQs, this will not be the case as some IRQs will not exist at this stage. Thus, in such case, use the default CPU to IRQ mapping which is the serial mapping based on IRQ vector index. Meaning, the n'th vector gets mapped to the n'th CPU. Introduce an API function mlx5_comp_vector_cpu() that takes an IRQ index and provides the corresponding CPU mapping. It utilizes the existing IRQ affinity if defined, or resorts to the default serialized CPU mapping otherwise. Signed-off-by: Maher Sanalla <msanalla@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
ddd2c79da0
commit
f3147015fa
@ -128,7 +128,7 @@ static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
|
||||
|
||||
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
|
||||
{
|
||||
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
|
||||
int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0);
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_trap *t;
|
||||
int err;
|
||||
|
@ -2445,7 +2445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
struct xsk_buff_pool *xsk_pool,
|
||||
struct mlx5e_channel **cp)
|
||||
{
|
||||
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
|
||||
int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_xsk_param xsk;
|
||||
struct mlx5e_channel *c;
|
||||
@ -2862,7 +2862,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
|
||||
cpumask_clear(priv->scratchpad.cpumask);
|
||||
|
||||
for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
|
||||
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
|
||||
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
|
||||
|
||||
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
|
||||
}
|
||||
|
@ -1058,7 +1058,7 @@ unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_comp_vectors_count);
|
||||
|
||||
struct cpumask *
|
||||
static struct cpumask *
|
||||
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
@ -1068,10 +1068,23 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
|
||||
if (eq)
|
||||
return mlx5_irq_get_affinity_mask(eq->core.irq);
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
|
||||
|
||||
int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
|
||||
{
|
||||
struct cpumask *mask;
|
||||
int cpu;
|
||||
|
||||
mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
|
||||
if (mask)
|
||||
cpu = cpumask_first(mask);
|
||||
else
|
||||
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
|
||||
|
||||
return cpu;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
|
||||
|
@ -1109,8 +1109,7 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
|
||||
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
|
||||
|
||||
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
|
||||
struct cpumask *
|
||||
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
|
||||
int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
|
||||
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
|
||||
u8 roce_version, u8 roce_l3_type, const u8 *gid,
|
||||
|
Loading…
x
Reference in New Issue
Block a user