net/mlx5e: Handle offloads flows per peer
Currently, E-switch offloads table have a list of all flows that create a peer_flow over the peer eswitch. In order to support more than one peer, extend E-switch offloads table peer_flow to hold an array of lists, where each peer have dedicate index via mlx5_get_dev_index(). Thereafter, extend original flow to hold an array of peers as well. Signed-off-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Mark Bloch <mbloch@nvidia.com> Reviewed-by: Roi Dayan <roid@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
0af3613ddc
commit
9be6c21fdc
@ -96,7 +96,7 @@ struct mlx5e_tc_flow {
|
||||
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
|
||||
struct list_head hairpin; /* flows sharing the same hairpin */
|
||||
struct list_head peer; /* flows with peer flow */
|
||||
struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */
|
||||
struct list_head unready; /* flows not ready to be offloaded (e.g
|
||||
* due to missing route)
|
||||
*/
|
||||
|
@ -1980,7 +1980,8 @@ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
|
||||
mlx5e_flow_put(priv, flow);
|
||||
}
|
||||
|
||||
static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
|
||||
static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
|
||||
int peer_index)
|
||||
{
|
||||
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
|
||||
struct mlx5e_tc_flow *peer_flow;
|
||||
@ -1991,18 +1992,32 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
|
||||
return;
|
||||
|
||||
mutex_lock(&esw->offloads.peer_mutex);
|
||||
list_del(&flow->peer);
|
||||
list_del(&flow->peer[peer_index]);
|
||||
mutex_unlock(&esw->offloads.peer_mutex);
|
||||
|
||||
flow_flag_clear(flow, DUP);
|
||||
|
||||
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
|
||||
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
|
||||
continue;
|
||||
if (refcount_dec_and_test(&peer_flow->refcnt)) {
|
||||
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
|
||||
list_del(&peer_flow->peer_flows);
|
||||
kfree(peer_flow);
|
||||
}
|
||||
}
|
||||
|
||||
if (list_empty(&flow->peer_flows))
|
||||
flow_flag_clear(flow, DUP);
|
||||
}
|
||||
|
||||
static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++) {
|
||||
if (i == mlx5_get_dev_index(flow->priv->mdev))
|
||||
continue;
|
||||
mlx5e_tc_del_fdb_peer_flow(flow, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||
@ -2017,7 +2032,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||
mlx5e_tc_del_fdb_flow(priv, flow);
|
||||
return;
|
||||
}
|
||||
mlx5e_tc_del_fdb_peer_flow(flow);
|
||||
mlx5e_tc_del_fdb_peers_flow(flow);
|
||||
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
mlx5e_tc_del_fdb_flow(priv, flow);
|
||||
} else {
|
||||
@ -4403,6 +4418,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
int i = mlx5_get_dev_index(peer_esw->dev);
|
||||
struct mlx5e_rep_priv *peer_urpriv;
|
||||
struct mlx5e_tc_flow *peer_flow;
|
||||
struct mlx5_core_dev *in_mdev;
|
||||
@ -4435,7 +4451,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
|
||||
list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
|
||||
flow_flag_set(flow, DUP);
|
||||
mutex_lock(&esw->offloads.peer_mutex);
|
||||
list_add_tail(&flow->peer, &esw->offloads.peer_flows);
|
||||
list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
|
||||
mutex_unlock(&esw->offloads.peer_mutex);
|
||||
|
||||
out:
|
||||
@ -5288,9 +5304,14 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
|
||||
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5e_tc_flow *flow, *tmp;
|
||||
int i;
|
||||
|
||||
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
|
||||
mlx5e_tc_del_fdb_peer_flow(flow);
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++) {
|
||||
if (i == mlx5_get_dev_index(esw->dev))
|
||||
continue;
|
||||
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
|
||||
mlx5e_tc_del_fdb_peers_flow(flow);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
|
||||
|
@ -249,7 +249,7 @@ struct mlx5_esw_offload {
|
||||
struct mlx5_flow_group *vport_rx_drop_group;
|
||||
struct mlx5_flow_handle *vport_rx_drop_rule;
|
||||
struct xarray vport_reps;
|
||||
struct list_head peer_flows;
|
||||
struct list_head peer_flows[MLX5_MAX_PORTS];
|
||||
struct mutex peer_mutex;
|
||||
struct mutex encap_tbl_lock; /* protects encap_tbl */
|
||||
DECLARE_HASHTABLE(encap_tbl, 8);
|
||||
|
@ -2825,8 +2825,10 @@ err_out:
|
||||
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&esw->offloads.peer_flows);
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
|
||||
mutex_init(&esw->offloads.peer_mutex);
|
||||
|
||||
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
|
Loading…
x
Reference in New Issue
Block a user