mlx5-updates-2021-06-09

Introduce steering header insert/remove and switchdev bridge offloads
 
 1) From Yevgeny, Steering header insert/remove support
 
 ConnectX supports offloading of various encapsulations and decapsulations
 (e.g. VXLAN), which are performed by 'Packet Reformat' action.
 Starting with ConnectX-6 DX, a new reformat type is supported - INSERT_HEADER.
 This reformat allows inserting an arbitrary size buffer at a selected location
 in the packet on RX flows.
 
 The insert/remove header support are needed as a prerequisite for the
 bridge offloads vlan pop/push supprt, see below.
 
 2) From Vlad, Support for bridge offloads for switchdev mode
 
 This change implements bridge offloads with VLAN-support that works on top
 of mlx5 representors in switchdev mode.
 
 HIGH-LEVEL OVERVIEW
 
 Hardware supported by mlx5 driver doesn't provide dynamic learning or aging
 functionality and requires the driver to emulate all switch-like behavior
 in software. As such, all packets by default go through miss path, appear
 on representor and get to software bridge, if it is the upper device of the
 representor. This causes bridge to process packet in software, learn the
 MAC address to FDB and send SWITCHDEV_FDB_ADD_TO_DEVICE event to all
 subscribers. Upon reception of SWITCHDEV_FDB_ADD_TO_DEVICE notification
 mlx5 bridge offloads the FDB to hardware and sends back
 SWITCHDEV_FDB_ADD_TO_BRIDGE notification to prevent such entries from being
 aged out by kernel bridge. Leaving aging to kernel bridge would result
 deletion of offloaded dynamic FDB entries every aging_time period due to
 packets being processed by hardware and, consecutively, 'used' timestamp
 for FDB entry not being updated. Hardware aging is emulated in driver by
 running periodic workqueue task that manually updates the rules according
 to their hardware counter:
 
 - If hardware counter has changed since last update, the handler updates
 'used' timestamp in kernel bridge dynamic entry by sending
 SWITCHDEV_FDB_ADD_TO_BRIDGE notification for the entry.
 
 - If FDB entry wasn't updated for user-controllable aging_time period,
 then the FDB entry is unoffloaded from hardware and corresponding
 SWITCHDEV_FDB_DEL_TO_BRIDGE notification is sent to kernel bridge.
 
 The mlx5 bridge offload implementation fully supports port VLAN objects,
 including PVID (vlan push) and "Egress Untagged" (vlan pop).
 
 SOFTWARE ARCHITECTURE
 
 Mlx5_eswitch is extended with pointer to new mlx5_esw_bridge_offloads
 structure which has a linked list of mlx5_esw_bridge objects. Struct
 mlx5_esw_bridge is the main switch object in mlx5 that holds all data for
 offloaded FDB entries and metadata for bridge ports and their vlans. The
 mlx5_esw_bridge object is created when first representor of eswitch vport
 is added to bridge and deleted when the last representor is detached from
 it. Bridge FDB entries are saved in linked list (to iterate over all FDB
 entries in aging workqueue task) and also in hashtable for quick lookup by
 MAC+VLAN tuple. Bridge FDB entries are saved in linked list (to iterate
 over all FDB entries in aging workqueue task) and in hashtable for quick
 lookup by MAC+VLAN tuple. Port metadata is stored in struct
 mlx5_esw_bridge_port that is saved in xarray to allow quick lookup by vport
 number. Part of the port metadata is the set of port vlans that are
 represented by mlx5_esw_bridge_vlan structure. The vlan structure points to
 all FDBs on vlan/port via fdb_list linked list.
 
 Simplified diagram of mlx5 bridge objects:
 
                       +------------------+
                       |  mxl5_eswitch    |
                       |                  |
                       |  br_offloads     |
                       +--------+---------+
                                |
                       +--------v-------------------+
                       |  mlx5_esw_bridge_offloads  |
                       |                            |
                    +-->  bridges                   |
                    |  +-------+--------------------+
                    |          |
                    |          |
                    |      +---v---------------+
                    |      | mlx5_esw_bridge   |
                    |      |                   |
                    |      | vports            |
                    |      |                   |
                    |      | fdb_ht            |
                    |      +---+---------------+
                    |          |
                    |      +---v---------------+
                    +------+ mlx5_esw_bridge   |
                           |                   |
 +-------------------------+ vports            |
 |                         |                   |
 |                         | fdb_ht            +------------------------------------------+
 |                         +-------------------+                                          |
 |                                                                                        |
 |                                                                                        |
 | +----------------------+                                 +---------------------------+ |
 +-> mlx5_esw_bridge_port |                              +--> mlx5_esw_bridge_fdb_entry <-+
 | |                      |    +----------------------+  |  +--+------------------------+ |
 | | vlans                +--+-> mlx5_esw_bridge_vlan |  |     |                          |
 | |                      |  | |                      |  |  +--v------------------------+ |
 | +----------------------+  | | fdb_list             +--+  | mlx5_esw_bridge_fdb_entry <-+
 |                           | +-------^--------------+     +--+------------------------+ |
 | +----------------------+  |         |                       |                          |
 +-> mlx5_esw_bridge_port |  |         +-----------------------+                          |
   |                      |  |                                                            |
   | vlans                |  | -----------------------+                                   |
   |                      |  +-> mlx5_esw_bridge_vlan |                                   |
   +----------------------+    |                      |     +---------------------------+ |
                               | fdb_list             +-----> mlx5_esw_bridge_fdb_entry <-+
                               +-------^--------------+     +--+------------------------+
                                       |                       |
                                       +-----------------------+
 
 HARDWARE REPRESENTATION
 
 In order to adhere to kernel software datapath model bridge offloads must
 come after TC and NF FDBs. However, since netfilter offload in mlx5 is
 implemented with unmanaged tables, its miss path is not automatically
 connected to next priority and requires the code to manually connect with
 slow table. To keep bridge offloads encapsulated and not mix it with
 eswitch offloads new FDB_TC_MISS priority is created between FDB_FT_OFFLOAD
 and FDB_SLOW_PATH which allows bridge offloads to be created without
 exposing its internal tables to any other modules since miss path of
 managed TC-miss table is automatically wired to next priority.
 
 The bridge tables are created with new priority FDB_BR_OFFLOAD in FDB
 namespace. The new priority is between tc-miss and slow path priorities.
 Priority consist of two levels: the ingress table that is global per
 eswitch and matches incoming packets by src_mac/vid and redirects them to
 next level (egress table) that is chosen according to ingress port bridge
 membership and matches on dst_mac/vid in order to redirect packet to vport
 according to the following diagram:
 
                 +
                 |
       +---------v----------+
       |                    |
       |   FDB_TC_OFFLOAD   |
       |                    |
       +---------+----------+
                 |
                 |
       +---------v----------+
       |                    |
       |   FDB_FT_OFFLOAD   |
       |                    |
       +---------+----------+
                 |
                 |
       +---------v----------+
       |                    |
       |    FDB_TC_MISS     |
       |                    |
       +---------+----------+
                 |
 +--------------------------------------+
 |               |                      |
 |        +------+                      |
 |        |                             |
 | +------v--------+   FDB_BR_OFFLOAD   |
 | | INGRESS_TABLE |                    |
 | +------+---+----+                    |
 |        |   |      match              |
 |        |   +---------+               |
 |        |             |               |    +-------+
 |        |     +-------v-------+ match |    |       |
 |        |     | EGRESS_TABLE  +------------> vport |
 |        |     +-------+-------+       |    |       |
 |        |             |               |    +-------+
 |        |    miss     |               |
 |        +------+------+               |
 |               |                      |
 +--------------------------------------+
                 |
                 |
       +---------v----------+
       |                    |
       |   FDB_SLOW_PATH    |
       |                    |
       +---------+----------+
                 |
                 v
 
 PATCHES OVERVIEW
 
 1-3 - Miscellaneous refactorings and infrastructure changes.
 
 4 - Mlx5 bridge offload infrastructure and dedicated fs_core
 namespace/tables implementation.
 
 5 - FDB entry offload.
 
 6 - Dynamic FDB entry aging.
 
 7-10 - VLAN filtering offload.
 
 11 - Tracepoints for main mlx5 bridge offload events (FDB entry
 offload/unoffload, VLAN add/delete, etc.)
 
 --
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmDBbIwACgkQSD+KveBX
 +j6BzwgAs7zTxCwsqYC+Zw77p0C+UwEpoq9e8aARkZXY9PExQi7SHG2LswN1JX3C
 MPf1nczNnos9D+P9VgbUWJP/3agtdYFbTu03toOl1W6pPRY7MVqrV14twT1zP7zA
 xDqSZvYJ1jZKNVsITzdwWh0u7PDrxKpYefaKYe7b3ghNbAOqCEReF61zMTg4pu4c
 LUkLx2f+diaQHY6TyQnUAMMH5O3j0bDF8JUbQK0ZX1+a1guP99t1zZKY35aBB1uQ
 GcwUSGEaThU71O8whOx4kaIjLyk2kNM4rP1WxZo8V9gFu81/FJ5XNISdd7XWjOsI
 z2Qf2Zu8xqXyjRF8cA5n0OIcK2UDrQ==
 =oe1K
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2021-06-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-06-09

Introduce steering header insert/remove and switchdev bridge offloads

1) From Yevgeny, Steering header insert/remove support

ConnectX supports offloading of various encapsulations and decapsulations
(e.g. VXLAN), which are performed by 'Packet Reformat' action.
Starting with ConnectX-6 DX, a new reformat type is supported - INSERT_HEADER.
This reformat allows inserting an arbitrary size buffer at a selected location
in the packet on RX flows.

The insert/remove header support are needed as a prerequisite for the
bridge offloads vlan pop/push supprt, see below.

2) From Vlad, Support for bridge offloads for switchdev mode

This change implements bridge offloads with VLAN-support that works on top
of mlx5 representors in switchdev mode.

HIGH-LEVEL OVERVIEW

Hardware supported by mlx5 driver doesn't provide dynamic learning or aging
functionality and requires the driver to emulate all switch-like behavior
in software. As such, all packets by default go through miss path, appear
on representor and get to software bridge, if it is the upper device of the
representor. This causes bridge to process packet in software, learn the
MAC address to FDB and send SWITCHDEV_FDB_ADD_TO_DEVICE event to all
subscribers. Upon reception of SWITCHDEV_FDB_ADD_TO_DEVICE notification
mlx5 bridge offloads the FDB to hardware and sends back
SWITCHDEV_FDB_ADD_TO_BRIDGE notification to prevent such entries from being
aged out by kernel bridge. Leaving aging to kernel bridge would result
deletion of offloaded dynamic FDB entries every aging_time period due to
packets being processed by hardware and, consecutively, 'used' timestamp
for FDB entry not being updated. Hardware aging is emulated in driver by
running periodic workqueue task that manually updates the rules according
to their hardware counter:

- If hardware counter has changed since last update, the handler updates
'used' timestamp in kernel bridge dynamic entry by sending
SWITCHDEV_FDB_ADD_TO_BRIDGE notification for the entry.

- If FDB entry wasn't updated for user-controllable aging_time period,
then the FDB entry is unoffloaded from hardware and corresponding
SWITCHDEV_FDB_DEL_TO_BRIDGE notification is sent to kernel bridge.

The mlx5 bridge offload implementation fully supports port VLAN objects,
including PVID (vlan push) and "Egress Untagged" (vlan pop).

SOFTWARE ARCHITECTURE

Mlx5_eswitch is extended with pointer to new mlx5_esw_bridge_offloads
structure which has a linked list of mlx5_esw_bridge objects. Struct
mlx5_esw_bridge is the main switch object in mlx5 that holds all data for
offloaded FDB entries and metadata for bridge ports and their vlans. The
mlx5_esw_bridge object is created when first representor of eswitch vport
is added to bridge and deleted when the last representor is detached from
it. Bridge FDB entries are saved in linked list (to iterate over all FDB
entries in aging workqueue task) and also in hashtable for quick lookup by
MAC+VLAN tuple. Bridge FDB entries are saved in linked list (to iterate
over all FDB entries in aging workqueue task) and in hashtable for quick
lookup by MAC+VLAN tuple. Port metadata is stored in struct
mlx5_esw_bridge_port that is saved in xarray to allow quick lookup by vport
number. Part of the port metadata is the set of port vlans that are
represented by mlx5_esw_bridge_vlan structure. The vlan structure points to
all FDBs on vlan/port via fdb_list linked list.

Simplified diagram of mlx5 bridge objects:

                      +------------------+
                      |  mxl5_eswitch    |
                      |                  |
                      |  br_offloads     |
                      +--------+---------+
                               |
                      +--------v-------------------+
                      |  mlx5_esw_bridge_offloads  |
                      |                            |
                   +-->  bridges                   |
                   |  +-------+--------------------+
                   |          |
                   |          |
                   |      +---v---------------+
                   |      | mlx5_esw_bridge   |
                   |      |                   |
                   |      | vports            |
                   |      |                   |
                   |      | fdb_ht            |
                   |      +---+---------------+
                   |          |
                   |      +---v---------------+
                   +------+ mlx5_esw_bridge   |
                          |                   |
+-------------------------+ vports            |
|                         |                   |
|                         | fdb_ht            +------------------------------------------+
|                         +-------------------+                                          |
|                                                                                        |
|                                                                                        |
| +----------------------+                                 +---------------------------+ |
+-> mlx5_esw_bridge_port |                              +--> mlx5_esw_bridge_fdb_entry <-+
| |                      |    +----------------------+  |  +--+------------------------+ |
| | vlans                +--+-> mlx5_esw_bridge_vlan |  |     |                          |
| |                      |  | |                      |  |  +--v------------------------+ |
| +----------------------+  | | fdb_list             +--+  | mlx5_esw_bridge_fdb_entry <-+
|                           | +-------^--------------+     +--+------------------------+ |
| +----------------------+  |         |                       |                          |
+-> mlx5_esw_bridge_port |  |         +-----------------------+                          |
  |                      |  |                                                            |
  | vlans                |  | -----------------------+                                   |
  |                      |  +-> mlx5_esw_bridge_vlan |                                   |
  +----------------------+    |                      |     +---------------------------+ |
                              | fdb_list             +-----> mlx5_esw_bridge_fdb_entry <-+
                              +-------^--------------+     +--+------------------------+
                                      |                       |
                                      +-----------------------+

HARDWARE REPRESENTATION

In order to adhere to kernel software datapath model bridge offloads must
come after TC and NF FDBs. However, since netfilter offload in mlx5 is
implemented with unmanaged tables, its miss path is not automatically
connected to next priority and requires the code to manually connect with
slow table. To keep bridge offloads encapsulated and not mix it with
eswitch offloads new FDB_TC_MISS priority is created between FDB_FT_OFFLOAD
and FDB_SLOW_PATH which allows bridge offloads to be created without
exposing its internal tables to any other modules since miss path of
managed TC-miss table is automatically wired to next priority.

The bridge tables are created with new priority FDB_BR_OFFLOAD in FDB
namespace. The new priority is between tc-miss and slow path priorities.
Priority consist of two levels: the ingress table that is global per
eswitch and matches incoming packets by src_mac/vid and redirects them to
next level (egress table) that is chosen according to ingress port bridge
membership and matches on dst_mac/vid in order to redirect packet to vport
according to the following diagram:

                +
                |
      +---------v----------+
      |                    |
      |   FDB_TC_OFFLOAD   |
      |                    |
      +---------+----------+
                |
                |
      +---------v----------+
      |                    |
      |   FDB_FT_OFFLOAD   |
      |                    |
      +---------+----------+
                |
                |
      +---------v----------+
      |                    |
      |    FDB_TC_MISS     |
      |                    |
      +---------+----------+
                |
+--------------------------------------+
|               |                      |
|        +------+                      |
|        |                             |
| +------v--------+   FDB_BR_OFFLOAD   |
| | INGRESS_TABLE |                    |
| +------+---+----+                    |
|        |   |      match              |
|        |   +---------+               |
|        |             |               |    +-------+
|        |     +-------v-------+ match |    |       |
|        |     | EGRESS_TABLE  +------------> vport |
|        |     +-------+-------+       |    |       |
|        |             |               |    +-------+
|        |    miss     |               |
|        +------+------+               |
|               |                      |
+--------------------------------------+
                |
                |
      +---------v----------+
      |                    |
      |   FDB_SLOW_PATH    |
      |                    |
      +---------+----------+
                |
                v

PATCHES OVERVIEW

1-3 - Miscellaneous refactorings and infrastructure changes.

4 - Mlx5 bridge offload infrastructure and dedicated fs_core
namespace/tables implementation.

5 - FDB entry offload.

6 - Dynamic FDB entry aging.

7-10 - VLAN filtering offload.

11 - Tracepoints for main mlx5 bridge offload events (FDB entry
offload/unoffload, VLAN add/delete, etc.)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

--
This commit is contained in:
David S. Miller 2021-06-10 13:36:37 -07:00
commit 2027e13f62
31 changed files with 2523 additions and 131 deletions

View File

@ -12,6 +12,7 @@ Contents
- `Enabling the driver and kconfig options`_
- `Devlink info`_
- `Devlink parameters`_
- `Bridge offload`_
- `mlx5 subfunction`_
- `mlx5 function attributes`_
- `Devlink health reporters`_
@ -217,6 +218,37 @@ users try to enable them.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
Bridge offload
==============
The mlx5 driver implements support for offloading bridge rules when in switchdev
mode. Linux bridge FDBs are automatically offloaded when mlx5 switchdev
representor is attached to bridge.
- Change device to switchdev mode::
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
- Attach mlx5 switchdev representor 'enp8s0f0' to bridge netdev 'bridge1'::
$ ip link set enp8s0f0 master bridge1
VLANs
-----
Following bridge VLAN functions are supported by mlx5:
- VLAN filtering (including multiple VLANs per port)::
$ ip link set bridge1 type bridge vlan_filtering 1
$ bridge vlan add dev enp8s0f0 vid 2-3
- VLAN push on bridge ingress::
$ bridge vlan add dev enp8s0f0 vid 3 pvid
- VLAN pop on bridge egress::
$ bridge vlan add dev enp8s0f0 vid 3 untagged
mlx5 subfunction
================
mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
@ -568,3 +600,59 @@ tc and eswitch offloads tracepoints:
$ cat /sys/kernel/debug/tracing/trace
...
kworker/u48:7-2221 [009] ...1 1475.387435: mlx5e_rep_neigh_update: netdev: ens1f0 MAC: 24:8a:07:9a:17:9a IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_connected=1
Bridge offloads tracepoints:
- mlx5_esw_bridge_fdb_entry_init: trace bridge FDB entry offloaded to mlx5::
$ echo mlx5:mlx5_esw_bridge_fdb_entry_init >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
kworker/u20:9-2217 [003] ...1 318.582243: mlx5_esw_bridge_fdb_entry_init: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=0 flags=0 used=0
- mlx5_esw_bridge_fdb_entry_cleanup: trace bridge FDB entry deleted from mlx5::
$ echo mlx5:mlx5_esw_bridge_fdb_entry_cleanup >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
ip-2581 [005] ...1 318.629871: mlx5_esw_bridge_fdb_entry_cleanup: net_device=enp8s0f0_1 addr=e4:fd:05:08:00:03 vid=0 flags=0 used=16
- mlx5_esw_bridge_fdb_entry_refresh: trace bridge FDB entry offload refreshed in
mlx5::
$ echo mlx5:mlx5_esw_bridge_fdb_entry_refresh >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
kworker/u20:8-3849 [003] ...1 466716: mlx5_esw_bridge_fdb_entry_refresh: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=3 flags=0 used=0
- mlx5_esw_bridge_vlan_create: trace bridge VLAN object add on mlx5
representor::
$ echo mlx5:mlx5_esw_bridge_vlan_create >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
ip-2560 [007] ...1 318.460258: mlx5_esw_bridge_vlan_create: vid=1 flags=6
- mlx5_esw_bridge_vlan_cleanup: trace bridge VLAN object delete from mlx5
representor::
$ echo mlx5:mlx5_esw_bridge_vlan_cleanup >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
bridge-2582 [007] ...1 318.653496: mlx5_esw_bridge_vlan_cleanup: vid=2 flags=8
- mlx5_esw_bridge_vport_init: trace mlx5 vport assigned with bridge upper
device::
$ echo mlx5:mlx5_esw_bridge_vport_init >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
ip-2560 [007] ...1 318.458915: mlx5_esw_bridge_vport_init: vport_num=1
- mlx5_esw_bridge_vport_cleanup: trace mlx5 vport removed from bridge upper
device::
$ echo mlx5:mlx5_esw_bridge_vport_cleanup >> set_event
$ cat /sys/kernel/debug/tracing/trace
...
ip-5387 [000] ...1 573713: mlx5_esw_bridge_vport_cleanup: vport_num=1

View File

@ -2280,6 +2280,7 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
u8 ft_type, u8 dv_prt,
void *in, size_t len)
{
struct mlx5_pkt_reformat_params reformat_params;
enum mlx5_flow_namespace_type namespace;
u8 prm_prt;
int ret;
@ -2292,9 +2293,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
if (ret)
return ret;
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = prm_prt;
reformat_params.size = len;
reformat_params.data = in;
maction->flow_action_raw.pkt_reformat =
mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
in, namespace);
mlx5_packet_reformat_alloc(dev->mdev, &reformat_params,
namespace);
if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
return ret;

View File

@ -79,6 +79,16 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
config MLX5_BRIDGE
bool
depends on MLX5_ESWITCH && BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
Enable adding representors of mlx5 uplink and VF ports to Bridge and
offloading rules for traffic between such ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
depends on MLX5_ESWITCH && NET_CLS_ACT

View File

@ -56,6 +56,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
esw/devlink_port.o esw/vporttbl.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o
mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o

View File

@ -0,0 +1,424 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <net/netevent.h>
#include <net/switchdev.h>
#include "bridge.h"
#include "esw/bridge.h"
#include "en_rep.h"
#define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
struct mlx5_bridge_switchdev_fdb_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
bool add;
};
static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
{
struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
struct mlx5_esw_bridge_offloads,
netdev_nb);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
struct net_device *upper;
struct mlx5e_priv *priv;
u16 vport_num;
if (!mlx5e_eswitch_rep(dev))
return 0;
upper = info->upper_dev;
if (!netif_is_bridge_master(upper))
return 0;
esw = br_offloads->esw;
priv = netdev_priv(dev);
if (esw != priv->mdev->priv.eswitch)
return 0;
rpriv = priv->ppriv;
vport_num = rpriv->rep->vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
extack = netdev_notifier_info_to_extack(&info->info);
return info->linking ?
mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) :
mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack);
}
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
int err = 0;
switch (event) {
case NETDEV_PRECHANGEUPPER:
break;
case NETDEV_CHANGEUPPER:
err = mlx5_esw_bridge_port_changeupper(nb, ptr);
break;
}
return notifier_from_errno(err);
}
static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
const struct switchdev_obj_port_vlan *vlan;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
struct mlx5e_priv *priv;
u16 vport_num;
int err = 0;
priv = netdev_priv(dev);
rpriv = priv->ppriv;
vport_num = rpriv->rep->vport;
esw = priv->mdev->priv.eswitch;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack);
break;
default:
return -EOPNOTSUPP;
}
return err;
}
static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
const struct switchdev_obj_port_vlan *vlan;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
struct mlx5e_priv *priv;
u16 vport_num;
priv = netdev_priv(dev);
rpriv = priv->ppriv;
vport_num = rpriv->rep->vport;
esw = priv->mdev->priv.eswitch;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
struct mlx5e_priv *priv;
u16 vport_num;
int err = 0;
priv = netdev_priv(dev);
rpriv = priv->ppriv;
vport_num = rpriv->rep->vport;
esw = priv->mdev->priv.eswitch;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
err = -EINVAL;
}
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport);
break;
default:
err = -EOPNOTSUPP;
}
return err;
}
static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add(dev, ptr,
mlx5e_eswitch_rep,
mlx5_esw_bridge_port_obj_add);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del(dev, ptr,
mlx5e_eswitch_rep,
mlx5_esw_bridge_port_obj_del);
break;
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
mlx5e_eswitch_rep,
mlx5_esw_bridge_port_obj_attr_set);
break;
default:
err = 0;
}
return notifier_from_errno(err);
}
static void
mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
{
dev_put(fdb_work->dev);
kfree(fdb_work->fdb_info.addr);
kfree(fdb_work);
}
static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
{
struct mlx5_bridge_switchdev_fdb_work *fdb_work =
container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
struct switchdev_notifier_fdb_info *fdb_info =
&fdb_work->fdb_info;
struct net_device *dev = fdb_work->dev;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
struct mlx5e_priv *priv;
u16 vport_num;
rtnl_lock();
priv = netdev_priv(dev);
rpriv = priv->ppriv;
vport_num = rpriv->rep->vport;
esw = priv->mdev->priv.eswitch;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
goto out;
if (fdb_work->add)
mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info);
else
mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info);
out:
rtnl_unlock();
mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
}
static struct mlx5_bridge_switchdev_fdb_work *
mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
struct switchdev_notifier_fdb_info *fdb_info)
{
struct mlx5_bridge_switchdev_fdb_work *work;
u8 *addr;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return ERR_PTR(-ENOMEM);
INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
if (!addr) {
kfree(work);
return ERR_PTR(-ENOMEM);
}
ether_addr_copy(addr, fdb_info->addr);
work->fdb_info.addr = addr;
dev_hold(dev);
work->dev = dev;
work->add = add;
return work;
}
static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
struct mlx5_esw_bridge_offloads,
nb);
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct switchdev_notifier_fdb_info *fdb_info;
struct mlx5_bridge_switchdev_fdb_work *work;
struct switchdev_notifier_info *info = ptr;
struct net_device *upper;
struct mlx5e_priv *priv;
if (!mlx5e_eswitch_rep(dev))
return NOTIFY_DONE;
priv = netdev_priv(dev);
if (priv->mdev->priv.eswitch != br_offloads->esw)
return NOTIFY_DONE;
if (event == SWITCHDEV_PORT_ATTR_SET) {
int err = switchdev_handle_port_attr_set(dev, ptr,
mlx5e_eswitch_rep,
mlx5_esw_bridge_port_obj_attr_set);
return notifier_from_errno(err);
}
upper = netdev_master_upper_dev_get_rcu(dev);
if (!upper)
return NOTIFY_DONE;
if (!netif_is_bridge_master(upper))
return NOTIFY_DONE;
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
event == SWITCHDEV_FDB_ADD_TO_DEVICE,
fdb_info);
if (IS_ERR(work)) {
WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
PTR_ERR(work));
return notifier_from_errno(PTR_ERR(work));
}
queue_work(br_offloads->wq, &work->work);
break;
default:
break;
}
return NOTIFY_DONE;
}
static void mlx5_esw_bridge_update_work(struct work_struct *work)
{
struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
struct mlx5_esw_bridge_offloads,
update_work.work);
rtnl_lock();
mlx5_esw_bridge_update(br_offloads);
rtnl_unlock();
queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
}
void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
{
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw =
mdev->priv.eswitch;
int err;
rtnl_lock();
br_offloads = mlx5_esw_bridge_init(esw);
rtnl_unlock();
if (IS_ERR(br_offloads)) {
esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
return;
}
br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
if (!br_offloads->wq) {
esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
goto err_alloc_wq;
}
INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
err = register_switchdev_notifier(&br_offloads->nb);
if (err) {
esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
goto err_register_swdev;
}
br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
if (err) {
esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
goto err_register_swdev_blk;
}
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
err = register_netdevice_notifier(&br_offloads->netdev_nb);
if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err);
goto err_register_netdev;
}
return;
err_register_netdev:
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
err_register_swdev_blk:
unregister_switchdev_notifier(&br_offloads->nb);
err_register_swdev:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
mlx5_esw_bridge_cleanup(esw);
}
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
{
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw =
mdev->priv.eswitch;
br_offloads = esw->br_offloads;
if (!br_offloads)
return;
unregister_netdevice_notifier(&br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
cancel_delayed_work(&br_offloads->update_work);
destroy_workqueue(br_offloads->wq);
rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
rtnl_unlock();
}

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#ifndef __MLX5_EN_REP_BRIDGE__
#define __MLX5_EN_REP_BRIDGE__
#include "en.h"
#if IS_ENABLED(CONFIG_MLX5_BRIDGE)
void mlx5e_rep_bridge_init(struct mlx5e_priv *priv);
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv);
#else /* CONFIG_MLX5_BRIDGE */
static inline void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) {}
static inline void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) {}
#endif /* CONFIG_MLX5_BRIDGE */
#endif /* __MLX5_EN_REP_BRIDGE__ */

View File

@ -212,6 +212,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
@ -295,9 +296,12 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
*/
goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
ipv4_encap_size, encap_header,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv4_encap_size;
reformat_params.data = encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@ -324,6 +328,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
char *encap_header;
@ -396,9 +401,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
*/
goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
ipv4_encap_size, encap_header,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv4_encap_size;
reformat_params.data = encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@ -471,6 +479,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
@ -553,9 +562,11 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
ipv6_encap_size, encap_header,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv6_encap_size;
reformat_params.data = encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@ -582,6 +593,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
int ipv6_encap_size;
@ -654,9 +666,11 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
ipv6_encap_size, encap_header,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = ipv6_encap_size;
reformat_params.data = encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);

View File

@ -120,6 +120,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_pkt_reformat_params reformat_params;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
@ -130,9 +131,12 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE)
return;
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
reformat_params.size = e->encap_size;
reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
e->encap_size, e->encap_header,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
@ -812,6 +816,7 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
@ -853,10 +858,12 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
mutex_unlock(&esw->offloads.decap_tbl_lock);
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
reformat_params.size = sizeof(parse_attr->eth);
reformat_params.data = &parse_attr->eth;
d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
sizeof(parse_attr->eth),
&parse_attr->eth,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(d->pkt_reformat)) {
err = PTR_ERR(d->pkt_reformat);

View File

@ -45,6 +45,7 @@
#include "en_tc.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
#include "en/rep/bridge.h"
#include "en/devlink.h"
#include "fs_core.h"
#include "lib/mlx5.h"
@ -536,13 +537,13 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_change_carrier = mlx5e_rep_change_carrier,
};
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops &&
mlx5e_is_uplink_rep(netdev_priv(netdev));
}
bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
@ -981,6 +982,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
mlx5e_rep_bridge_init(priv);
netdev->wanted_features |= NETIF_F_HW_TC;
@ -1002,6 +1004,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
netif_device_detach(priv->netdev);
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);

View File

@ -231,9 +231,9 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
bool mlx5e_eswitch_vf_rep(const struct net_device *netdev);
bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev);
static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
{
return mlx5e_eswitch_vf_rep(netdev) ||
mlx5e_eswitch_uplink_rep(netdev);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#ifndef __MLX5_ESW_BRIDGE_H__
#define __MLX5_ESW_BRIDGE_H__
#include <linux/notifier.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include "eswitch.h"
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_esw_bridge_offloads {
struct mlx5_eswitch *esw;
struct list_head bridges;
struct notifier_block netdev_nb;
struct notifier_block nb_blk;
struct notifier_block nb;
struct workqueue_struct *wq;
struct delayed_work update_work;
struct mlx5_flow_table *ingress_ft;
struct mlx5_flow_group *ingress_vlan_fg;
struct mlx5_flow_group *ingress_filter_fg;
struct mlx5_flow_group *ingress_mac_fg;
struct mlx5_flow_table *skip_ft;
};
struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw);
void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw);
int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_vport *vport, struct netlink_ext_ack *extack);
int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_vport *vport, struct netlink_ext_ack *extack);
void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
struct switchdev_notifier_fdb_info *fdb_info);
void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
struct switchdev_notifier_fdb_info *fdb_info);
void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
struct mlx5_vport *vport, struct netlink_ext_ack *extack);
void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#endif /* __MLX5_ESW_BRIDGE_H__ */

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#ifndef _MLX5_ESW_BRIDGE_PRIVATE_
#define _MLX5_ESW_BRIDGE_PRIVATE_
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/if_ether.h>
#include <linux/rhashtable.h>
#include <linux/xarray.h>
#include "fs_core.h"
struct mlx5_esw_bridge_fdb_key {
unsigned char addr[ETH_ALEN];
u16 vid;
};
enum {
MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
};
struct mlx5_esw_bridge_fdb_entry {
struct mlx5_esw_bridge_fdb_key key;
struct rhash_head ht_node;
struct net_device *dev;
struct list_head list;
struct list_head vlan_list;
u16 vport_num;
u16 flags;
struct mlx5_flow_handle *ingress_handle;
struct mlx5_fc *ingress_counter;
unsigned long lastuse;
struct mlx5_flow_handle *egress_handle;
struct mlx5_flow_handle *filter_handle;
};
struct mlx5_esw_bridge_vlan {
u16 vid;
u16 flags;
struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop;
};
struct mlx5_esw_bridge_port {
u16 vport_num;
struct xarray vlans;
};
#endif /* _MLX5_ESW_BRIDGE_PRIVATE_ */

View File

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mlx5
#if !defined(_MLX5_ESW_BRIDGE_TRACEPOINT_) || defined(TRACE_HEADER_MULTI_READ)
#define _MLX5_ESW_BRIDGE_TRACEPOINT_
#include <linux/tracepoint.h>
#include "../bridge_priv.h"
DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
TP_ARGS(fdb),
TP_STRUCT__entry(
__array(char, dev_name, IFNAMSIZ)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, vid)
__field(u16, flags)
__field(unsigned int, used)
),
TP_fast_assign(
strncpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
__entry->vid = fdb->key.vid;
__entry->flags = fdb->flags;
__entry->used = jiffies_to_msecs(jiffies - fdb->lastuse)
),
TP_printk("net_device=%s addr=%pM vid=%hu flags=%hx used=%u",
__entry->dev_name,
__entry->addr,
__entry->vid,
__entry->flags,
__entry->used / 1000)
);
DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
mlx5_esw_bridge_fdb_entry_init,
TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
TP_ARGS(fdb)
);
DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
mlx5_esw_bridge_fdb_entry_refresh,
TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
TP_ARGS(fdb)
);
DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
mlx5_esw_bridge_fdb_entry_cleanup,
TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
TP_ARGS(fdb)
);
DECLARE_EVENT_CLASS(mlx5_esw_bridge_vlan_template,
TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
TP_ARGS(vlan),
TP_STRUCT__entry(
__field(u16, vid)
__field(u16, flags)
),
TP_fast_assign(
__entry->vid = vlan->vid;
__entry->flags = vlan->flags;
),
TP_printk("vid=%hu flags=%hx",
__entry->vid,
__entry->flags)
);
DEFINE_EVENT(mlx5_esw_bridge_vlan_template,
mlx5_esw_bridge_vlan_create,
TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
TP_ARGS(vlan)
);
DEFINE_EVENT(mlx5_esw_bridge_vlan_template,
mlx5_esw_bridge_vlan_cleanup,
TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
TP_ARGS(vlan)
);
DECLARE_EVENT_CLASS(mlx5_esw_bridge_port_template,
TP_PROTO(const struct mlx5_esw_bridge_port *port),
TP_ARGS(port),
TP_STRUCT__entry(
__field(u16, vport_num)
),
TP_fast_assign(
__entry->vport_num = port->vport_num;
),
TP_printk("vport_num=%hu", __entry->vport_num)
);
DEFINE_EVENT(mlx5_esw_bridge_port_template,
mlx5_esw_bridge_vport_init,
TP_PROTO(const struct mlx5_esw_bridge_port *port),
TP_ARGS(port)
);
DEFINE_EVENT(mlx5_esw_bridge_port_template,
mlx5_esw_bridge_vport_cleanup,
TP_PROTO(const struct mlx5_esw_bridge_port *port),
TP_ARGS(port)
);
#endif
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH esw/diag
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE bridge_tracepoint
#include <trace/define_trace.h>

View File

@ -150,6 +150,8 @@ enum mlx5_eswitch_vport_event {
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
struct mlx5_esw_bridge;
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@ -178,6 +180,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
struct mlx5_esw_bridge *bridge;
};
struct mlx5_esw_indir_table;
@ -196,6 +199,7 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *tc_miss_table;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *send_to_vport_meta_grp;
@ -270,6 +274,8 @@ enum {
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
};
struct mlx5_esw_bridge_offloads;
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@ -299,6 +305,7 @@ struct mlx5_eswitch {
u32 root_tsar_id;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
int mode;
u16 manager_vport;

View File

@ -1634,7 +1634,21 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.slow_fdb = fdb;
err = esw_chains_create(esw, fdb);
/* Create empty TC-miss managed table. This allows plugging in following
* priorities without directly exposing their level 0 table to
* eswitch_offloads and passing it as miss_fdb to following call to
* esw_chains_create().
*/
memset(&ft_attr, 0, sizeof(ft_attr));
ft_attr.prio = FDB_TC_MISS;
esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
goto tc_miss_table_err;
}
err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
if (err) {
esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
@ -1779,6 +1793,8 @@ send_vport_meta_err:
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
tc_miss_table_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
@ -1806,6 +1822,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,

View File

@ -111,9 +111,7 @@ static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
}
static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
int reformat_type,
size_t size,
void *reformat_data,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@ -701,9 +699,7 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
}
static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
int reformat_type,
size_t size,
void *reformat_data,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@ -721,14 +717,14 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
if (size > max_encap_size) {
if (params->size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
size, max_encap_size);
params->size, max_encap_size);
return -EINVAL;
}
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
GFP_KERNEL);
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
params->size, GFP_KERNEL);
if (!in)
return -ENOMEM;
@ -737,15 +733,20 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat = MLX5_ADDR_OF(packet_reformat_context_in,
packet_reformat_context_in,
reformat_data);
inlen = reformat - (void *)in + size;
inlen = reformat - (void *)in + params->size;
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_data_size, size);
reformat_data_size, params->size);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_type, reformat_type);
memcpy(reformat, reformat_data, size);
reformat_type, params->type);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_param_0, params->param_0);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_param_1, params->param_1);
if (params->data && params->size)
memcpy(reformat, params->data, params->size);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));

View File

@ -77,9 +77,7 @@ struct mlx5_flow_cmds {
bool disconnect);
int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
int reformat_type,
size_t size,
void *reformat_data,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat);

View File

@ -2780,6 +2780,18 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (err)
goto out_err;
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
@ -3165,9 +3177,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_modify_header_dealloc);
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
int reformat_type,
size_t size,
void *reformat_data,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_pkt_reformat *pkt_reformat;
@ -3183,9 +3193,8 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
return ERR_PTR(-ENOMEM);
pkt_reformat->ns_type = ns_type;
pkt_reformat->reformat_type = reformat_type;
err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
reformat_data, ns_type,
pkt_reformat->reformat_type = params->type;
err = root->cmds->packet_reformat_alloc(root, params, ns_type,
pkt_reformat);
if (err) {
kfree(pkt_reformat);

View File

@ -148,6 +148,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
if (err)

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
#include "dr_ste.h"
enum dr_action_domain {
DR_ACTION_DOMAIN_NIC_INGRESS,
@ -14,7 +15,8 @@ enum dr_action_domain {
enum dr_action_valid_state {
DR_ACTION_STATE_ERR,
DR_ACTION_STATE_NO_ACTION,
DR_ACTION_STATE_REFORMAT,
DR_ACTION_STATE_ENCAP,
DR_ACTION_STATE_DECAP,
DR_ACTION_STATE_MODIFY_HDR,
DR_ACTION_STATE_MODIFY_VLAN,
DR_ACTION_STATE_NON_TERM,
@ -31,26 +33,42 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
[DR_ACTION_STATE_REFORMAT] = {
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@ -60,6 +78,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@ -67,8 +88,11 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
@ -81,22 +105,24 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
[DR_ACTION_STATE_REFORMAT] = {
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
@ -104,15 +130,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
@ -125,25 +153,41 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_REFORMAT] = {
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@ -152,13 +196,19 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@ -173,23 +223,25 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_REFORMAT] = {
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
@ -198,8 +250,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_NON_TERM] = {
@ -207,8 +260,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
@ -235,6 +289,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
*action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
break;
case DR_ACTION_REFORMAT_TYP_INSERT_HDR:
*action_type = DR_ACTION_TYP_INSERT_HDR;
break;
default:
return -EINVAL;
}
@ -454,8 +511,13 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
attr.reformat_size = action->reformat->reformat_size;
attr.reformat_id = action->reformat->reformat_id;
if (rx_rule &&
!(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) {
mlx5dr_info(dmn, "Device doesn't support Encap on RX\n");
goto out_invalid_arg;
}
attr.reformat.size = action->reformat->size;
attr.reformat.id = action->reformat->id;
break;
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
@ -481,6 +543,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
break;
case DR_ACTION_TYP_INSERT_HDR:
attr.reformat.size = action->reformat->size;
attr.reformat.id = action->reformat->id;
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
default:
goto out_invalid_arg;
}
@ -543,6 +611,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite),
[DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport),
[DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan),
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
};
static struct mlx5dr_action *
@ -651,7 +720,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
if (reformat_action) {
reformat_req = true;
hw_dests[i].vport.reformat_id =
reformat_action->reformat->reformat_id;
reformat_action->reformat->id;
ref_actions[num_of_ref++] = reformat_action;
hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@ -758,11 +827,15 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
static int
dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
struct mlx5dr_domain *dmn,
u8 reformat_param_0,
u8 reformat_param_1,
size_t data_sz,
void *data)
{
if ((!data && data_sz) || (data && !data_sz) || reformat_type >
DR_ACTION_TYP_L2_TO_TNL_L3) {
if ((!data && data_sz) || (data && !data_sz) ||
((reformat_param_0 || reformat_param_1) &&
reformat_type != DR_ACTION_TYP_INSERT_HDR) ||
reformat_type > DR_ACTION_TYP_INSERT_HDR) {
mlx5dr_dbg(dmn, "Invalid reformat parameter!\n");
goto out_err;
}
@ -794,6 +867,7 @@ out_err:
static int
dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
u8 reformat_param_0, u8 reformat_param_1,
size_t data_sz, void *data,
struct mlx5dr_action *action)
{
@ -811,13 +885,14 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
else
rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, 0, 0,
data_sz, data,
&reformat_id);
if (ret)
return ret;
action->reformat->reformat_id = reformat_id;
action->reformat->reformat_size = data_sz;
action->reformat->id = reformat_id;
action->reformat->size = data_sz;
return 0;
}
case DR_ACTION_TYP_TNL_L2_TO_L2:
@ -859,6 +934,23 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
return 0;
}
case DR_ACTION_TYP_INSERT_HDR:
{
ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev,
MLX5_REFORMAT_TYPE_INSERT_HDR,
reformat_param_0,
reformat_param_1,
data_sz, data,
&reformat_id);
if (ret)
return ret;
action->reformat->id = reformat_id;
action->reformat->size = data_sz;
action->reformat->param_0 = reformat_param_0;
action->reformat->param_1 = reformat_param_1;
return 0;
}
default:
mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
return -EINVAL;
@ -896,6 +988,8 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
u8 reformat_param_0,
u8 reformat_param_1,
size_t data_sz,
void *data)
{
@ -912,7 +1006,9 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
goto dec_ref;
}
ret = dr_action_verify_reformat_params(action_type, dmn, data_sz, data);
ret = dr_action_verify_reformat_params(action_type, dmn,
reformat_param_0, reformat_param_1,
data_sz, data);
if (ret)
goto dec_ref;
@ -923,6 +1019,8 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
action->reformat->dmn = dmn;
ret = dr_action_create_reformat_action(dmn,
reformat_param_0,
reformat_param_1,
data_sz,
data,
action);
@ -1516,8 +1614,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
case DR_ACTION_TYP_INSERT_HDR:
mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
action->reformat->reformat_id);
action->reformat->id);
refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_MODIFY_HDR:

View File

@ -460,6 +460,8 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
enum mlx5_reformat_ctx_type rt,
u8 reformat_param_0,
u8 reformat_param_1,
size_t reformat_size,
void *reformat_data,
u32 *reformat_id)
@ -486,8 +488,11 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
memcpy(pdata, reformat_data, reformat_size);
if (reformat_data && reformat_size)
memcpy(pdata, reformat_data, reformat_size);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)

View File

@ -156,6 +156,7 @@ struct mlx5dr_ste_ctx {
u16 (*get_byte_mask)(u8 *hw_ste_p);
/* Actions */
u32 actions_caps;
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,

View File

@ -437,8 +437,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
attr->gvmi);
dr_ste_v0_set_tx_encap(last_ste,
attr->reformat_id,
attr->reformat_size,
attr->reformat.id,
attr->reformat.size,
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
/* Whenever prio_tag_required enabled, we can be sure that the
* previous table (ACL) already push vlan to our packet,
@ -1893,6 +1893,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.get_byte_mask = &dr_ste_v0_get_byte_mask,
/* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_NONE,
.set_actions_rx = &dr_ste_v0_set_actions_rx,
.set_actions_tx = &dr_ste_v0_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),

View File

@ -116,6 +116,8 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
@ -246,6 +248,12 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
},
[MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
},
};
static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
@ -361,8 +369,8 @@ static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
u32 reformat_id, int size)
static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@ -374,6 +382,26 @@ static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
u32 reformat_id,
u8 anchor, u8 offset,
int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
/* The hardware expects here size and offset in words (2 byte) */
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
u32 vlan_hdr)
{
@ -401,11 +429,11 @@ static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p,
u8 *frst_s_action,
u8 *scnd_d_action,
u32 reformat_id,
int size)
static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
u8 *frst_s_action,
u8 *scnd_d_action,
u32 reformat_id,
int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@ -519,9 +547,9 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
dr_ste_v1_set_tx_encap(last_ste, action,
attr->reformat_id,
attr->reformat_size);
dr_ste_v1_set_encap(last_ste, action,
attr->reformat.id,
attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@ -532,12 +560,25 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_tx_encap_l3(last_ste,
action, d_action,
attr->reformat_id,
attr->reformat_size);
dr_ste_v1_set_encap_l3(last_ste,
action, d_action,
attr->reformat.id,
attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_insert_hdr(last_ste, action,
attr->reformat.id,
attr->reformat.param_0,
attr->reformat.param_1,
attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
@ -616,7 +657,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_CTR]) {
/* Counter action set after decap to exclude decaped header */
/* Counter action set after decap and before insert_hdr
* to exclude decaped / encaped header respectively.
*/
if (!allow_ctr) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
@ -627,6 +670,52 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
}
if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_encap(last_ste, action,
attr->reformat.id,
attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action;
if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_encap_l3(last_ste,
action, d_action,
attr->reformat.id,
attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
/* Modify header, decap, and encap must use different STEs */
if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_insert_hdr(last_ste, action,
attr->reformat.id,
attr->reformat.param_0,
attr->reformat.param_1,
attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@ -1865,6 +1954,7 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask,
/* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),

View File

@ -89,6 +89,11 @@ enum {
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
};
enum mlx5dr_ste_ctx_action_cap {
DR_STE_CTX_ACTION_CAP_NONE = 0,
DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0,
};
enum {
DR_MODIFY_ACTION_SIZE = 8,
};
@ -118,6 +123,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_VPORT,
DR_ACTION_TYP_POP_VLAN,
DR_ACTION_TYP_PUSH_VLAN,
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_MAX,
};
@ -261,8 +267,12 @@ struct mlx5dr_ste_actions_attr {
u32 ctr_id;
u16 gvmi;
u16 hit_gvmi;
u32 reformat_id;
u32 reformat_size;
struct {
u32 id;
u32 size;
u8 param_0;
u8 param_1;
} reformat;
struct {
int count;
u32 headers[MLX5DR_MAX_VLANS];
@ -903,8 +913,10 @@ struct mlx5dr_action_rewrite {
struct mlx5dr_action_reformat {
struct mlx5dr_domain *dmn;
u32 reformat_id;
u32 reformat_size;
u32 id;
u32 size;
u8 param_0;
u8 param_1;
};
struct mlx5dr_action_dest_tbl {
@ -1142,6 +1154,8 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
struct mlx5dr_cmd_query_flow_table_details *output);
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
enum mlx5_reformat_ctx_type rt,
u8 reformat_param_0,
u8 reformat_param_1,
size_t reformat_size,
void *reformat_data,
u32 *reformat_id);

View File

@ -289,7 +289,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
tmp_action = mlx5dr_action_create_packet_reformat(domain,
decap_type, 0,
decap_type,
0, 0, 0,
NULL);
if (!tmp_action) {
err = -ENOMEM;
@ -522,9 +523,7 @@ out_err:
}
static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
int reformat_type,
size_t size,
void *reformat_data,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@ -532,7 +531,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
struct mlx5dr_action *action;
int dr_reformat;
switch (reformat_type) {
switch (params->type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
@ -544,16 +543,21 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
break;
case MLX5_REFORMAT_TYPE_INSERT_HDR:
dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
break;
default:
mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
reformat_type);
params->type);
return -EOPNOTSUPP;
}
action = mlx5dr_action_create_packet_reformat(dr_domain,
dr_reformat,
size,
reformat_data);
params->param_0,
params->param_1,
params->size,
params->data);
if (!action) {
mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
return -EINVAL;

View File

@ -26,6 +26,7 @@ enum mlx5dr_action_reformat_type {
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
DR_ACTION_REFORMAT_TYP_INSERT_HDR,
};
struct mlx5dr_match_parameters {
@ -105,6 +106,8 @@ mlx5dr_action_create_flow_counter(u32 counter_id);
struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
u8 reformat_param_0,
u8 reformat_param_1,
size_t data_sz,
void *data);

View File

@ -1179,6 +1179,7 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_GENERAL_2 = 0x20,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@ -1220,6 +1221,15 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_2(mdev, cap) \
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
#define MLX5_CAP_GEN_2_64(mdev, cap) \
MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)

View File

@ -87,6 +87,8 @@ enum {
FDB_BYPASS_PATH,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_TC_MISS,
FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
FDB_PER_VPORT,
};
@ -254,10 +256,16 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
struct mlx5_pkt_reformat_params {
int type;
u8 param_0;
u8 param_1;
size_t size;
void *data;
};
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
int reformat_type,
size_t size,
void *reformat_data,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);

View File

@ -435,7 +435,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_40[0x20];
u8 reserved_at_60[0x18];
u8 reserved_at_60[0x2];
u8 reformat_insert[0x1];
u8 reformat_remove[0x1];
u8 reserver_at_64[0x14];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@ -1312,7 +1315,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x1f];
u8 vhca_resource_manager[0x1];
u8 reserved_at_20[0x3];
u8 hca_cap_2[0x1];
u8 reserved_at_21[0x2];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@ -1732,6 +1736,17 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_7c0[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0xa0];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
u8 max_reformat_remove_size[0x8];
u8 max_reformat_remove_offset[0x8];
u8 reserved_at_c0[0x740];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
@ -3105,6 +3120,7 @@ struct mlx5_ifc_roce_addr_layout_bits {
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
@ -5785,12 +5801,14 @@ struct mlx5_ifc_query_eq_in_bits {
};
struct mlx5_ifc_packet_reformat_context_in_bits {
u8 reserved_at_0[0x5];
u8 reformat_type[0x3];
u8 reserved_at_8[0xe];
u8 reformat_type[0x8];
u8 reserved_at_8[0x4];
u8 reformat_param_0[0x4];
u8 reserved_at_10[0x6];
u8 reformat_data_size[0xa];
u8 reserved_at_20[0x10];
u8 reformat_param_1[0x8];
u8 reserved_at_28[0x8];
u8 reformat_data[2][0x8];
u8 more_reformat_data[][0x8];
@ -5830,12 +5848,20 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
enum {
MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
};
enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@ -5956,6 +5982,8 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {