net/mlx5: Configure IPsec steering for egress RoCEv2 traffic
Add steering table/rule in RDMA_TX domain, to forward all traffic to IPsec crypto table in NIC domain. Signed-off-by: Mark Zhang <markzhang@nvidia.com> Signed-off-by: Patrisious Haddad <phaddad@nvidia.com> Reviewed-by: Raed Salem <raeds@nvidia.com> Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
899577600b
commit
22551e77e5
@ -334,7 +334,8 @@ out:
|
||||
}
|
||||
|
||||
/* IPsec TX flow steering */
|
||||
static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
|
||||
static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
|
||||
struct mlx5_ipsec_fs *roce)
|
||||
{
|
||||
struct mlx5_flow_destination dest = {};
|
||||
struct mlx5_flow_table *ft;
|
||||
@ -357,8 +358,15 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
|
||||
err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
|
||||
if (err)
|
||||
goto err_pol_miss;
|
||||
|
||||
err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
|
||||
if (err)
|
||||
goto err_roce;
|
||||
return 0;
|
||||
|
||||
err_roce:
|
||||
mlx5_del_flow_rules(tx->pol.rule);
|
||||
mlx5_destroy_flow_group(tx->pol.group);
|
||||
err_pol_miss:
|
||||
mlx5_destroy_flow_table(tx->ft.pol);
|
||||
err_pol_ft:
|
||||
@ -376,9 +384,10 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
|
||||
if (tx->ft.refcnt)
|
||||
goto skip;
|
||||
|
||||
err = tx_create(mdev, tx);
|
||||
err = tx_create(mdev, tx, ipsec->roce);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
skip:
|
||||
tx->ft.refcnt++;
|
||||
out:
|
||||
@ -397,6 +406,7 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
|
||||
if (tx->ft.refcnt)
|
||||
goto out;
|
||||
|
||||
mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce);
|
||||
mlx5_del_flow_rules(tx->pol.rule);
|
||||
mlx5_destroy_flow_group(tx->pol.group);
|
||||
mlx5_destroy_flow_table(tx->ft.pol);
|
||||
|
@ -43,7 +43,8 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
|
||||
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
|
||||
|
||||
if (mlx5_get_roce_state(mdev) &&
|
||||
(MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA))
|
||||
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
|
||||
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
|
||||
caps |= MLX5_IPSEC_CAP_ROCE;
|
||||
|
||||
if (!caps)
|
||||
|
@ -20,9 +20,17 @@ struct mlx5_ipsec_rx_roce {
|
||||
struct mlx5_flow_namespace *ns_rdma;
|
||||
};
|
||||
|
||||
struct mlx5_ipsec_tx_roce {
|
||||
struct mlx5_flow_group *g;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_namespace *ns;
|
||||
};
|
||||
|
||||
struct mlx5_ipsec_fs {
|
||||
struct mlx5_ipsec_rx_roce ipv4_rx;
|
||||
struct mlx5_ipsec_rx_roce ipv6_rx;
|
||||
struct mlx5_ipsec_tx_roce tx;
|
||||
};
|
||||
|
||||
static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
|
||||
@ -86,6 +94,105 @@ fail_add_rule:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_ipsec_tx_roce *roce,
|
||||
struct mlx5_flow_table *pol_ft)
|
||||
{
|
||||
struct mlx5_flow_destination dst = {};
|
||||
MLX5_DECLARE_FLOW_ACT(flow_act);
|
||||
struct mlx5_flow_handle *rule;
|
||||
int err = 0;
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
|
||||
dst.ft = pol_ft;
|
||||
rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
|
||||
1);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
|
||||
err);
|
||||
goto out;
|
||||
}
|
||||
roce->rule = rule;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
|
||||
{
|
||||
struct mlx5_ipsec_tx_roce *tx_roce;
|
||||
|
||||
if (!ipsec_roce)
|
||||
return;
|
||||
|
||||
tx_roce = &ipsec_roce->tx;
|
||||
|
||||
mlx5_del_flow_rules(tx_roce->rule);
|
||||
mlx5_destroy_flow_group(tx_roce->g);
|
||||
mlx5_destroy_flow_table(tx_roce->ft);
|
||||
}
|
||||
|
||||
#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
|
||||
|
||||
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_ipsec_fs *ipsec_roce,
|
||||
struct mlx5_flow_table *pol_ft)
|
||||
{
|
||||
struct mlx5_flow_table_attr ft_attr = {};
|
||||
struct mlx5_ipsec_tx_roce *roce;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_group *g;
|
||||
int ix = 0;
|
||||
int err;
|
||||
u32 *in;
|
||||
|
||||
if (!ipsec_roce)
|
||||
return 0;
|
||||
|
||||
roce = &ipsec_roce->tx;
|
||||
|
||||
in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
ft_attr.max_fte = 1;
|
||||
ft = mlx5_create_flow_table(roce->ns, &ft_attr);
|
||||
if (IS_ERR(ft)) {
|
||||
err = PTR_ERR(ft);
|
||||
mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
roce->ft = ft;
|
||||
|
||||
MLX5_SET_CFG(in, start_flow_index, ix);
|
||||
ix += MLX5_TX_ROCE_GROUP_SIZE;
|
||||
MLX5_SET_CFG(in, end_flow_index, ix - 1);
|
||||
g = mlx5_create_flow_group(ft, in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
|
||||
goto fail;
|
||||
}
|
||||
roce->g = g;
|
||||
|
||||
err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
|
||||
goto rule_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
rule_fail:
|
||||
mlx5_destroy_flow_group(roce->g);
|
||||
fail:
|
||||
mlx5_destroy_flow_table(ft);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
|
||||
{
|
||||
struct mlx5_ipsec_rx_roce *rx_roce;
|
||||
@ -245,5 +352,17 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
|
||||
roce_ipsec->ipv4_rx.ns_rdma = ns;
|
||||
roce_ipsec->ipv6_rx.ns_rdma = ns;
|
||||
|
||||
ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
|
||||
if (!ns) {
|
||||
mlx5_core_err(mdev, "Failed to get RoCE tx ns\n");
|
||||
goto err_tx;
|
||||
}
|
||||
|
||||
roce_ipsec->tx.ns = ns;
|
||||
|
||||
return roce_ipsec;
|
||||
|
||||
err_tx:
|
||||
kfree(roce_ipsec);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -15,6 +15,10 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_flow_namespace *ns,
|
||||
struct mlx5_flow_destination *default_dst,
|
||||
u32 family, u32 level, u32 prio);
|
||||
void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce);
|
||||
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_ipsec_fs *ipsec_roce,
|
||||
struct mlx5_flow_table *pol_ft);
|
||||
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
|
||||
struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user