net/mlx5e: CT: Use own workqueue instead of mlx5e priv
Allocate a ct priv workqueue instead of using mlx5e priv one so flushing will only be of related CT entries. Also move flushing of the workqueue before rhashtable destroy otherwise entries won't be valid. Fixes: b069e14fff46 ("net/mlx5e: CT: Fix queued up restore put() executing after relevant ft release") Signed-off-by: Roi Dayan <roid@nvidia.com> Reviewed-by: Oz Shlomo <ozsh@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
1afbd1e283
commit
6c4e8fa03f
@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv {
|
||||
struct mlx5_ct_fs *fs;
|
||||
struct mlx5_ct_fs_ops *fs_ops;
|
||||
spinlock_t ht_lock; /* protects ft entries */
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
struct mlx5_tc_ct_debugfs debugfs;
|
||||
};
|
||||
@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
|
||||
static void
|
||||
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
|
||||
{
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
if (!refcount_dec_and_test(&entry->refcnt))
|
||||
return;
|
||||
|
||||
priv = netdev_priv(entry->ct_priv->netdev);
|
||||
INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
|
||||
queue_work(priv->wq, &entry->work);
|
||||
queue_work(entry->ct_priv->wq, &entry->work);
|
||||
}
|
||||
|
||||
static struct mlx5_ct_counter *
|
||||
@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
|
||||
static void
|
||||
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
|
||||
{
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
if (!refcount_dec_and_test(&ft->refcount))
|
||||
return;
|
||||
|
||||
flush_workqueue(ct_priv->wq);
|
||||
nf_flow_table_offload_del_cb(ft->nf_ft,
|
||||
mlx5_tc_ct_block_flow_offload, ft);
|
||||
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
|
||||
rhashtable_free_and_destroy(&ft->ct_entries_ht,
|
||||
mlx5_tc_ct_flush_ft_entry,
|
||||
ct_priv);
|
||||
priv = netdev_priv(ct_priv->netdev);
|
||||
flush_workqueue(priv->wq);
|
||||
mlx5_tc_ct_free_pre_ct_tables(ft);
|
||||
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
|
||||
kfree(ft);
|
||||
@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
|
||||
if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
|
||||
goto err_ct_tuples_nat_ht;
|
||||
|
||||
ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
|
||||
if (!ct_priv->wq) {
|
||||
err = -ENOMEM;
|
||||
goto err_wq;
|
||||
}
|
||||
|
||||
err = mlx5_tc_ct_fs_init(ct_priv);
|
||||
if (err)
|
||||
goto err_init_fs;
|
||||
@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
|
||||
return ct_priv;
|
||||
|
||||
err_init_fs:
|
||||
destroy_workqueue(ct_priv->wq);
|
||||
err_wq:
|
||||
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
|
||||
err_ct_tuples_nat_ht:
|
||||
rhashtable_destroy(&ct_priv->ct_tuples_ht);
|
||||
@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
|
||||
if (!ct_priv)
|
||||
return;
|
||||
|
||||
destroy_workqueue(ct_priv->wq);
|
||||
mlx5_ct_tc_remove_dbgfs(ct_priv);
|
||||
chains = ct_priv->chains;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user