2018-09-09 23:20:39 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
2021-03-22 23:37:15 +03:00
* Lantiq / Intel GSWIP switch driver for VRX200 , xRX300 and xRX330 SoCs
2018-09-09 23:20:39 +03:00
*
* Copyright ( C ) 2010 Lantiq Deutschland
* Copyright ( C ) 2012 John Crispin < john @ phrozen . org >
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
* Copyright ( C ) 2017 - 2019 Hauke Mehrtens < hauke @ hauke - m . de >
*
* The VLAN and bridge model the GSWIP hardware uses does not directly
* matches the model DSA uses .
*
* The hardware has 64 possible table entries for bridges with one VLAN
* ID , one flow id and a list of ports for each bridge . All entries which
* match the same flow ID are combined in the mac learning table , they
* act as one global bridge .
* The hardware does not support VLAN filter on the port , but on the
* bridge , this driver converts the DSA model to the hardware .
*
* The CPU gets all the exception frames which do not match any forwarding
* rule and the CPU port is also added to all bridges . This makes it possible
* to handle all the special cases easily in software .
* At the initialization the driver allocates one bridge table entry for
* each switch port which is used when the port is used without an
* explicit bridge . This prevents the frames from being forwarded
* between all LAN ports by default .
2018-09-09 23:20:39 +03:00
*/
# include <linux/clk.h>
2020-11-15 19:57:57 +03:00
# include <linux/delay.h>
2018-09-09 23:20:39 +03:00
# include <linux/etherdevice.h>
# include <linux/firmware.h>
# include <linux/if_bridge.h>
# include <linux/if_vlan.h>
# include <linux/iopoll.h>
# include <linux/mfd/syscon.h>
# include <linux/module.h>
# include <linux/of_mdio.h>
# include <linux/of_net.h>
# include <linux/of_platform.h>
# include <linux/phy.h>
# include <linux/phylink.h>
# include <linux/platform_device.h>
# include <linux/regmap.h>
# include <linux/reset.h>
# include <net/dsa.h>
# include <dt-bindings/mips/lantiq_rcu_gphy.h>
# include "lantiq_pce.h"
/* GSWIP MDIO Registers */
# define GSWIP_MDIO_GLOB 0x00
# define GSWIP_MDIO_GLOB_ENABLE BIT(15)
# define GSWIP_MDIO_CTRL 0x08
# define GSWIP_MDIO_CTRL_BUSY BIT(12)
# define GSWIP_MDIO_CTRL_RD BIT(11)
# define GSWIP_MDIO_CTRL_WR BIT(10)
# define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
# define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
# define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
# define GSWIP_MDIO_READ 0x09
# define GSWIP_MDIO_WRITE 0x0A
# define GSWIP_MDIO_MDC_CFG0 0x0B
# define GSWIP_MDIO_MDC_CFG1 0x0C
# define GSWIP_MDIO_PHYp(p) (0x15 - (p))
# define GSWIP_MDIO_PHY_LINK_MASK 0x6000
# define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
# define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
# define GSWIP_MDIO_PHY_LINK_UP 0x2000
# define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
# define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
# define GSWIP_MDIO_PHY_SPEED_M10 0x0000
# define GSWIP_MDIO_PHY_SPEED_M100 0x0800
# define GSWIP_MDIO_PHY_SPEED_G1 0x1000
# define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
# define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
# define GSWIP_MDIO_PHY_FDUP_EN 0x0200
# define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
# define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
# define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
# define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
# define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
# define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
# define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
# define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
# define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
# define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
# define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
GSWIP_MDIO_PHY_FCONRX_MASK | \
GSWIP_MDIO_PHY_FCONTX_MASK | \
GSWIP_MDIO_PHY_LINK_MASK | \
GSWIP_MDIO_PHY_SPEED_MASK | \
GSWIP_MDIO_PHY_FDUP_MASK )
/* GSWIP MII Registers */
2021-01-03 04:25:44 +03:00
# define GSWIP_MII_CFGp(p) (0x2 * (p))
2021-04-08 21:38:28 +03:00
# define GSWIP_MII_CFG_RESET BIT(15)
2018-09-09 23:20:39 +03:00
# define GSWIP_MII_CFG_EN BIT(14)
2021-04-08 21:38:28 +03:00
# define GSWIP_MII_CFG_ISOLATE BIT(13)
2018-09-09 23:20:39 +03:00
# define GSWIP_MII_CFG_LDCLKDIS BIT(12)
2021-04-08 21:38:28 +03:00
# define GSWIP_MII_CFG_RGMII_IBS BIT(8)
# define GSWIP_MII_CFG_RMII_CLK BIT(7)
2018-09-09 23:20:39 +03:00
# define GSWIP_MII_CFG_MODE_MIIP 0x0
# define GSWIP_MII_CFG_MODE_MIIM 0x1
# define GSWIP_MII_CFG_MODE_RMIIP 0x2
# define GSWIP_MII_CFG_MODE_RMIIM 0x3
# define GSWIP_MII_CFG_MODE_RGMII 0x4
2021-03-22 23:37:15 +03:00
# define GSWIP_MII_CFG_MODE_GMII 0x9
2018-09-09 23:20:39 +03:00
# define GSWIP_MII_CFG_MODE_MASK 0xf
# define GSWIP_MII_CFG_RATE_M2P5 0x00
# define GSWIP_MII_CFG_RATE_M25 0x10
# define GSWIP_MII_CFG_RATE_M125 0x20
# define GSWIP_MII_CFG_RATE_M50 0x30
# define GSWIP_MII_CFG_RATE_AUTO 0x40
# define GSWIP_MII_CFG_RATE_MASK 0x70
# define GSWIP_MII_PCDU0 0x01
# define GSWIP_MII_PCDU1 0x03
# define GSWIP_MII_PCDU5 0x05
# define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
# define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
/* GSWIP Core Registers */
# define GSWIP_SWRES 0x000
# define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
# define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
# define GSWIP_VERSION 0x013
# define GSWIP_VERSION_REV_SHIFT 0
# define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
# define GSWIP_VERSION_MOD_SHIFT 8
# define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
# define GSWIP_VERSION_2_0 0x100
# define GSWIP_VERSION_2_1 0x021
# define GSWIP_VERSION_2_2 0x122
# define GSWIP_VERSION_2_2_ETC 0x022
# define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
# define GSWIP_BM_RAM_ADDR 0x044
# define GSWIP_BM_RAM_CTRL 0x045
# define GSWIP_BM_RAM_CTRL_BAS BIT(15)
# define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
# define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
# define GSWIP_BM_QUEUE_GCTRL 0x04A
# define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
/* buffer management Port Configuration Register */
# define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
# define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
# define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
/* buffer management Port Control Register */
# define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
# define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
# define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
/* PCE */
# define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
# define GSWIP_PCE_TBL_MASK 0x448
# define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
# define GSWIP_PCE_TBL_ADDR 0x44E
# define GSWIP_PCE_TBL_CTRL 0x44F
# define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
# define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
# define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
# define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
# define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
# define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
# define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
# define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
# define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
# define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
# define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
# define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
# define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
# define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
# define GSWIP_PCE_GCTRL_0 0x456
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
# define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
2018-09-09 23:20:39 +03:00
# define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
# define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
# define GSWIP_PCE_GCTRL_1 0x457
# define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
# define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
# define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
# define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
# define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
# define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
2018-09-09 23:20:39 +03:00
# define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
# define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
# define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
# define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
# define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
# define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
# define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
# define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
# define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
# define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
# define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
# define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
# define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
2018-09-09 23:20:39 +03:00
# define GSWIP_MAC_FLEN 0x8C5
2021-04-08 21:38:27 +03:00
# define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
# define GSWIP_MAC_CTRL_0_PADEN BIT(8)
# define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
# define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
# define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
# define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
# define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
# define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
# define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
# define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
# define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
# define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
# define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
# define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
# define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
# define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
# define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
2018-09-09 23:20:39 +03:00
# define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
# define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
/* Ethernet Switch Fetch DMA Port Control Register */
# define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
# define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
# define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
# define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
# define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
# define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
# define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
# define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
# define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
/* Ethernet Switch Store DMA Port Control Register */
# define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
# define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
# define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
# define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
# define GSWIP_TABLE_ACTIVE_VLAN 0x01
# define GSWIP_TABLE_VLAN_MAPPING 0x02
2019-05-06 01:25:09 +03:00
# define GSWIP_TABLE_MAC_BRIDGE 0x0b
# define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
2018-09-09 23:20:39 +03:00
# define XRX200_GPHY_FW_ALIGN (16 * 1024)
struct gswip_hw_info {
int max_ports ;
int cpu_port ;
2021-03-22 23:37:15 +03:00
const struct dsa_switch_ops * ops ;
2018-09-09 23:20:39 +03:00
} ;
struct xway_gphy_match_data {
char * fe_firmware_name ;
char * ge_firmware_name ;
} ;
struct gswip_gphy_fw {
struct clk * clk_gate ;
struct reset_control * reset ;
u32 fw_addr_offset ;
char * fw_name ;
} ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
struct gswip_vlan {
struct net_device * bridge ;
u16 vid ;
u8 fid ;
} ;
2018-09-09 23:20:39 +03:00
struct gswip_priv {
__iomem void * gswip ;
__iomem void * mdio ;
__iomem void * mii ;
const struct gswip_hw_info * hw_info ;
const struct xway_gphy_match_data * gphy_fw_name_cfg ;
struct dsa_switch * ds ;
struct device * dev ;
struct regmap * rcu_regmap ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
struct gswip_vlan vlans [ 64 ] ;
2018-09-09 23:20:39 +03:00
int num_gphy_fw ;
struct gswip_gphy_fw * gphy_fw ;
2019-05-06 01:25:08 +03:00
u32 port_vlan_filter ;
2018-09-09 23:20:39 +03:00
} ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
struct gswip_pce_table_entry {
u16 index ; // PCE_TBL_ADDR.ADDR = pData->table_index
u16 table ; // PCE_TBL_CTRL.ADDR = pData->table
u16 key [ 8 ] ;
u16 val [ 5 ] ;
u16 mask ;
u8 gmap ;
bool type ;
bool valid ;
bool key_mode ;
} ;
2018-09-09 23:20:39 +03:00
struct gswip_rmon_cnt_desc {
unsigned int size ;
unsigned int offset ;
const char * name ;
} ;
# define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
static const struct gswip_rmon_cnt_desc gswip_rmon_cnt [ ] = {
/** Receive Packet Count (only packets that are accepted and not discarded). */
MIB_DESC ( 1 , 0x1F , " RxGoodPkts " ) ,
MIB_DESC ( 1 , 0x23 , " RxUnicastPkts " ) ,
MIB_DESC ( 1 , 0x22 , " RxMulticastPkts " ) ,
MIB_DESC ( 1 , 0x21 , " RxFCSErrorPkts " ) ,
MIB_DESC ( 1 , 0x1D , " RxUnderSizeGoodPkts " ) ,
MIB_DESC ( 1 , 0x1E , " RxUnderSizeErrorPkts " ) ,
MIB_DESC ( 1 , 0x1B , " RxOversizeGoodPkts " ) ,
MIB_DESC ( 1 , 0x1C , " RxOversizeErrorPkts " ) ,
MIB_DESC ( 1 , 0x20 , " RxGoodPausePkts " ) ,
MIB_DESC ( 1 , 0x1A , " RxAlignErrorPkts " ) ,
MIB_DESC ( 1 , 0x12 , " Rx64BytePkts " ) ,
MIB_DESC ( 1 , 0x13 , " Rx127BytePkts " ) ,
MIB_DESC ( 1 , 0x14 , " Rx255BytePkts " ) ,
MIB_DESC ( 1 , 0x15 , " Rx511BytePkts " ) ,
MIB_DESC ( 1 , 0x16 , " Rx1023BytePkts " ) ,
/** Receive Size 1024-1522 (or more, if configured) Packet Count. */
MIB_DESC ( 1 , 0x17 , " RxMaxBytePkts " ) ,
MIB_DESC ( 1 , 0x18 , " RxDroppedPkts " ) ,
MIB_DESC ( 1 , 0x19 , " RxFilteredPkts " ) ,
MIB_DESC ( 2 , 0x24 , " RxGoodBytes " ) ,
MIB_DESC ( 2 , 0x26 , " RxBadBytes " ) ,
MIB_DESC ( 1 , 0x11 , " TxAcmDroppedPkts " ) ,
MIB_DESC ( 1 , 0x0C , " TxGoodPkts " ) ,
MIB_DESC ( 1 , 0x06 , " TxUnicastPkts " ) ,
MIB_DESC ( 1 , 0x07 , " TxMulticastPkts " ) ,
MIB_DESC ( 1 , 0x00 , " Tx64BytePkts " ) ,
MIB_DESC ( 1 , 0x01 , " Tx127BytePkts " ) ,
MIB_DESC ( 1 , 0x02 , " Tx255BytePkts " ) ,
MIB_DESC ( 1 , 0x03 , " Tx511BytePkts " ) ,
MIB_DESC ( 1 , 0x04 , " Tx1023BytePkts " ) ,
/** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
MIB_DESC ( 1 , 0x05 , " TxMaxBytePkts " ) ,
MIB_DESC ( 1 , 0x08 , " TxSingleCollCount " ) ,
MIB_DESC ( 1 , 0x09 , " TxMultCollCount " ) ,
MIB_DESC ( 1 , 0x0A , " TxLateCollCount " ) ,
MIB_DESC ( 1 , 0x0B , " TxExcessCollCount " ) ,
MIB_DESC ( 1 , 0x0D , " TxPauseCount " ) ,
MIB_DESC ( 1 , 0x10 , " TxDroppedPkts " ) ,
MIB_DESC ( 2 , 0x0E , " TxGoodBytes " ) ,
} ;
static u32 gswip_switch_r ( struct gswip_priv * priv , u32 offset )
{
return __raw_readl ( priv - > gswip + ( offset * 4 ) ) ;
}
static void gswip_switch_w ( struct gswip_priv * priv , u32 val , u32 offset )
{
__raw_writel ( val , priv - > gswip + ( offset * 4 ) ) ;
}
static void gswip_switch_mask ( struct gswip_priv * priv , u32 clear , u32 set ,
u32 offset )
{
u32 val = gswip_switch_r ( priv , offset ) ;
val & = ~ ( clear ) ;
val | = set ;
gswip_switch_w ( priv , val , offset ) ;
}
static u32 gswip_switch_r_timeout ( struct gswip_priv * priv , u32 offset ,
u32 cleared )
{
u32 val ;
return readx_poll_timeout ( __raw_readl , priv - > gswip + ( offset * 4 ) , val ,
( val & cleared ) = = 0 , 20 , 50000 ) ;
}
static u32 gswip_mdio_r ( struct gswip_priv * priv , u32 offset )
{
return __raw_readl ( priv - > mdio + ( offset * 4 ) ) ;
}
static void gswip_mdio_w ( struct gswip_priv * priv , u32 val , u32 offset )
{
__raw_writel ( val , priv - > mdio + ( offset * 4 ) ) ;
}
static void gswip_mdio_mask ( struct gswip_priv * priv , u32 clear , u32 set ,
u32 offset )
{
u32 val = gswip_mdio_r ( priv , offset ) ;
val & = ~ ( clear ) ;
val | = set ;
gswip_mdio_w ( priv , val , offset ) ;
}
static u32 gswip_mii_r ( struct gswip_priv * priv , u32 offset )
{
return __raw_readl ( priv - > mii + ( offset * 4 ) ) ;
}
static void gswip_mii_w ( struct gswip_priv * priv , u32 val , u32 offset )
{
__raw_writel ( val , priv - > mii + ( offset * 4 ) ) ;
}
static void gswip_mii_mask ( struct gswip_priv * priv , u32 clear , u32 set ,
u32 offset )
{
u32 val = gswip_mii_r ( priv , offset ) ;
val & = ~ ( clear ) ;
val | = set ;
gswip_mii_w ( priv , val , offset ) ;
}
static void gswip_mii_mask_cfg ( struct gswip_priv * priv , u32 clear , u32 set ,
int port )
{
2021-01-03 04:25:44 +03:00
/* There's no MII_CFG register for the CPU port */
if ( ! dsa_is_cpu_port ( priv - > ds , port ) )
gswip_mii_mask ( priv , clear , set , GSWIP_MII_CFGp ( port ) ) ;
2018-09-09 23:20:39 +03:00
}
static void gswip_mii_mask_pcdu ( struct gswip_priv * priv , u32 clear , u32 set ,
int port )
{
switch ( port ) {
case 0 :
gswip_mii_mask ( priv , clear , set , GSWIP_MII_PCDU0 ) ;
break ;
case 1 :
gswip_mii_mask ( priv , clear , set , GSWIP_MII_PCDU1 ) ;
break ;
case 5 :
gswip_mii_mask ( priv , clear , set , GSWIP_MII_PCDU5 ) ;
break ;
}
}
static int gswip_mdio_poll ( struct gswip_priv * priv )
{
int cnt = 100 ;
while ( likely ( cnt - - ) ) {
u32 ctrl = gswip_mdio_r ( priv , GSWIP_MDIO_CTRL ) ;
if ( ( ctrl & GSWIP_MDIO_CTRL_BUSY ) = = 0 )
return 0 ;
usleep_range ( 20 , 40 ) ;
}
return - ETIMEDOUT ;
}
static int gswip_mdio_wr ( struct mii_bus * bus , int addr , int reg , u16 val )
{
struct gswip_priv * priv = bus - > priv ;
int err ;
err = gswip_mdio_poll ( priv ) ;
if ( err ) {
dev_err ( & bus - > dev , " waiting for MDIO bus busy timed out \n " ) ;
return err ;
}
gswip_mdio_w ( priv , val , GSWIP_MDIO_WRITE ) ;
gswip_mdio_w ( priv , GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
( ( addr & GSWIP_MDIO_CTRL_PHYAD_MASK ) < < GSWIP_MDIO_CTRL_PHYAD_SHIFT ) |
( reg & GSWIP_MDIO_CTRL_REGAD_MASK ) ,
GSWIP_MDIO_CTRL ) ;
return 0 ;
}
static int gswip_mdio_rd ( struct mii_bus * bus , int addr , int reg )
{
struct gswip_priv * priv = bus - > priv ;
int err ;
err = gswip_mdio_poll ( priv ) ;
if ( err ) {
dev_err ( & bus - > dev , " waiting for MDIO bus busy timed out \n " ) ;
return err ;
}
gswip_mdio_w ( priv , GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
( ( addr & GSWIP_MDIO_CTRL_PHYAD_MASK ) < < GSWIP_MDIO_CTRL_PHYAD_SHIFT ) |
( reg & GSWIP_MDIO_CTRL_REGAD_MASK ) ,
GSWIP_MDIO_CTRL ) ;
err = gswip_mdio_poll ( priv ) ;
if ( err ) {
dev_err ( & bus - > dev , " waiting for MDIO bus busy timed out \n " ) ;
return err ;
}
return gswip_mdio_r ( priv , GSWIP_MDIO_READ ) ;
}
static int gswip_mdio ( struct gswip_priv * priv , struct device_node * mdio_np )
{
struct dsa_switch * ds = priv - > ds ;
ds - > slave_mii_bus = devm_mdiobus_alloc ( priv - > dev ) ;
if ( ! ds - > slave_mii_bus )
return - ENOMEM ;
ds - > slave_mii_bus - > priv = priv ;
ds - > slave_mii_bus - > read = gswip_mdio_rd ;
ds - > slave_mii_bus - > write = gswip_mdio_wr ;
ds - > slave_mii_bus - > name = " lantiq,xrx200-mdio " ;
snprintf ( ds - > slave_mii_bus - > id , MII_BUS_ID_SIZE , " %s-mii " ,
dev_name ( priv - > dev ) ) ;
ds - > slave_mii_bus - > parent = priv - > dev ;
ds - > slave_mii_bus - > phy_mask = ~ ds - > phys_mii_mask ;
return of_mdiobus_register ( ds - > slave_mii_bus , mdio_np ) ;
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
static int gswip_pce_table_entry_read ( struct gswip_priv * priv ,
struct gswip_pce_table_entry * tbl )
{
int i ;
int err ;
u16 crtl ;
u16 addr_mode = tbl - > key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD ;
err = gswip_switch_r_timeout ( priv , GSWIP_PCE_TBL_CTRL ,
GSWIP_PCE_TBL_CTRL_BAS ) ;
if ( err )
return err ;
gswip_switch_w ( priv , tbl - > index , GSWIP_PCE_TBL_ADDR ) ;
gswip_switch_mask ( priv , GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK ,
tbl - > table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS ,
GSWIP_PCE_TBL_CTRL ) ;
err = gswip_switch_r_timeout ( priv , GSWIP_PCE_TBL_CTRL ,
GSWIP_PCE_TBL_CTRL_BAS ) ;
if ( err )
return err ;
for ( i = 0 ; i < ARRAY_SIZE ( tbl - > key ) ; i + + )
tbl - > key [ i ] = gswip_switch_r ( priv , GSWIP_PCE_TBL_KEY ( i ) ) ;
for ( i = 0 ; i < ARRAY_SIZE ( tbl - > val ) ; i + + )
tbl - > val [ i ] = gswip_switch_r ( priv , GSWIP_PCE_TBL_VAL ( i ) ) ;
tbl - > mask = gswip_switch_r ( priv , GSWIP_PCE_TBL_MASK ) ;
crtl = gswip_switch_r ( priv , GSWIP_PCE_TBL_CTRL ) ;
tbl - > type = ! ! ( crtl & GSWIP_PCE_TBL_CTRL_TYPE ) ;
tbl - > valid = ! ! ( crtl & GSWIP_PCE_TBL_CTRL_VLD ) ;
tbl - > gmap = ( crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK ) > > 7 ;
return 0 ;
}
static int gswip_pce_table_entry_write ( struct gswip_priv * priv ,
struct gswip_pce_table_entry * tbl )
{
int i ;
int err ;
u16 crtl ;
u16 addr_mode = tbl - > key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR ;
err = gswip_switch_r_timeout ( priv , GSWIP_PCE_TBL_CTRL ,
GSWIP_PCE_TBL_CTRL_BAS ) ;
if ( err )
return err ;
gswip_switch_w ( priv , tbl - > index , GSWIP_PCE_TBL_ADDR ) ;
gswip_switch_mask ( priv , GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK ,
tbl - > table | addr_mode ,
GSWIP_PCE_TBL_CTRL ) ;
for ( i = 0 ; i < ARRAY_SIZE ( tbl - > key ) ; i + + )
gswip_switch_w ( priv , tbl - > key [ i ] , GSWIP_PCE_TBL_KEY ( i ) ) ;
for ( i = 0 ; i < ARRAY_SIZE ( tbl - > val ) ; i + + )
gswip_switch_w ( priv , tbl - > val [ i ] , GSWIP_PCE_TBL_VAL ( i ) ) ;
gswip_switch_mask ( priv , GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK ,
tbl - > table | addr_mode ,
GSWIP_PCE_TBL_CTRL ) ;
gswip_switch_w ( priv , tbl - > mask , GSWIP_PCE_TBL_MASK ) ;
crtl = gswip_switch_r ( priv , GSWIP_PCE_TBL_CTRL ) ;
crtl & = ~ ( GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
GSWIP_PCE_TBL_CTRL_GMAP_MASK ) ;
if ( tbl - > type )
crtl | = GSWIP_PCE_TBL_CTRL_TYPE ;
if ( tbl - > valid )
crtl | = GSWIP_PCE_TBL_CTRL_VLD ;
crtl | = ( tbl - > gmap < < 7 ) & GSWIP_PCE_TBL_CTRL_GMAP_MASK ;
crtl | = GSWIP_PCE_TBL_CTRL_BAS ;
gswip_switch_w ( priv , crtl , GSWIP_PCE_TBL_CTRL ) ;
return gswip_switch_r_timeout ( priv , GSWIP_PCE_TBL_CTRL ,
GSWIP_PCE_TBL_CTRL_BAS ) ;
}
/* Add the LAN port into a bridge with the CPU port by
* default . This prevents automatic forwarding of
* packages between the LAN ports when no explicit
* bridge is configured .
*/
static int gswip_add_single_port_br ( struct gswip_priv * priv , int port , bool add )
{
struct gswip_pce_table_entry vlan_active = { 0 , } ;
struct gswip_pce_table_entry vlan_mapping = { 0 , } ;
unsigned int cpu_port = priv - > hw_info - > cpu_port ;
unsigned int max_ports = priv - > hw_info - > max_ports ;
int err ;
if ( port > = max_ports ) {
dev_err ( priv - > dev , " single port for %i supported \n " , port ) ;
return - EIO ;
}
vlan_active . index = port + 1 ;
vlan_active . table = GSWIP_TABLE_ACTIVE_VLAN ;
vlan_active . key [ 0 ] = 0 ; /* vid */
vlan_active . val [ 0 ] = port + 1 /* fid */ ;
vlan_active . valid = add ;
err = gswip_pce_table_entry_write ( priv , & vlan_active ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write active VLAN: %d \n " , err ) ;
return err ;
}
if ( ! add )
return 0 ;
vlan_mapping . index = port + 1 ;
vlan_mapping . table = GSWIP_TABLE_VLAN_MAPPING ;
vlan_mapping . val [ 0 ] = 0 /* vid */ ;
vlan_mapping . val [ 1 ] = BIT ( port ) | BIT ( cpu_port ) ;
vlan_mapping . val [ 2 ] = 0 ;
err = gswip_pce_table_entry_write ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write VLAN mapping: %d \n " , err ) ;
return err ;
}
return 0 ;
}
2018-09-09 23:20:39 +03:00
static int gswip_port_enable ( struct dsa_switch * ds , int port ,
struct phy_device * phydev )
{
struct gswip_priv * priv = ds - > priv ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
int err ;
2019-08-19 23:00:49 +03:00
if ( ! dsa_is_user_port ( ds , port ) )
return 0 ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
if ( ! dsa_is_cpu_port ( ds , port ) ) {
err = gswip_add_single_port_br ( priv , port , true ) ;
if ( err )
return err ;
}
2018-09-09 23:20:39 +03:00
/* RMON Counter Enable for port */
gswip_switch_w ( priv , GSWIP_BM_PCFG_CNTEN , GSWIP_BM_PCFGp ( port ) ) ;
/* enable port fetch/store dma & VLAN Modification */
gswip_switch_mask ( priv , 0 , GSWIP_FDMA_PCTRL_EN |
GSWIP_FDMA_PCTRL_VLANMOD_BOTH ,
GSWIP_FDMA_PCTRLp ( port ) ) ;
gswip_switch_mask ( priv , 0 , GSWIP_SDMA_PCTRL_EN ,
GSWIP_SDMA_PCTRLp ( port ) ) ;
if ( ! dsa_is_cpu_port ( ds , port ) ) {
2021-04-08 21:38:27 +03:00
u32 mdio_phy = 0 ;
if ( phydev )
mdio_phy = phydev - > mdio . addr & GSWIP_MDIO_PHY_ADDR_MASK ;
gswip_mdio_mask ( priv , GSWIP_MDIO_PHY_ADDR_MASK , mdio_phy ,
GSWIP_MDIO_PHYp ( port ) ) ;
2018-09-09 23:20:39 +03:00
}
return 0 ;
}
2019-02-24 22:44:43 +03:00
static void gswip_port_disable ( struct dsa_switch * ds , int port )
2018-09-09 23:20:39 +03:00
{
struct gswip_priv * priv = ds - > priv ;
2019-08-19 23:00:49 +03:00
if ( ! dsa_is_user_port ( ds , port ) )
return ;
2018-09-09 23:20:39 +03:00
gswip_switch_mask ( priv , GSWIP_FDMA_PCTRL_EN , 0 ,
GSWIP_FDMA_PCTRLp ( port ) ) ;
gswip_switch_mask ( priv , GSWIP_SDMA_PCTRL_EN , 0 ,
GSWIP_SDMA_PCTRLp ( port ) ) ;
}
static int gswip_pce_load_microcode ( struct gswip_priv * priv )
{
int i ;
int err ;
gswip_switch_mask ( priv , GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK ,
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR , GSWIP_PCE_TBL_CTRL ) ;
gswip_switch_w ( priv , 0 , GSWIP_PCE_TBL_MASK ) ;
for ( i = 0 ; i < ARRAY_SIZE ( gswip_pce_microcode ) ; i + + ) {
gswip_switch_w ( priv , i , GSWIP_PCE_TBL_ADDR ) ;
gswip_switch_w ( priv , gswip_pce_microcode [ i ] . val_0 ,
GSWIP_PCE_TBL_VAL ( 0 ) ) ;
gswip_switch_w ( priv , gswip_pce_microcode [ i ] . val_1 ,
GSWIP_PCE_TBL_VAL ( 1 ) ) ;
gswip_switch_w ( priv , gswip_pce_microcode [ i ] . val_2 ,
GSWIP_PCE_TBL_VAL ( 2 ) ) ;
gswip_switch_w ( priv , gswip_pce_microcode [ i ] . val_3 ,
GSWIP_PCE_TBL_VAL ( 3 ) ) ;
/* start the table access: */
gswip_switch_mask ( priv , 0 , GSWIP_PCE_TBL_CTRL_BAS ,
GSWIP_PCE_TBL_CTRL ) ;
err = gswip_switch_r_timeout ( priv , GSWIP_PCE_TBL_CTRL ,
GSWIP_PCE_TBL_CTRL_BAS ) ;
if ( err )
return err ;
}
/* tell the switch that the microcode is loaded */
gswip_switch_mask ( priv , 0 , GSWIP_PCE_GCTRL_0_MC_VALID ,
GSWIP_PCE_GCTRL_0 ) ;
return 0 ;
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
static int gswip_port_vlan_filtering ( struct dsa_switch * ds , int port ,
2021-02-13 23:43:19 +03:00
bool vlan_filtering ,
struct netlink_ext_ack * extack )
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
{
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:50 +03:00
struct net_device * bridge = dsa_to_port ( ds , port ) - > bridge_dev ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
struct gswip_priv * priv = ds - > priv ;
2019-05-06 01:25:08 +03:00
/* Do not allow changing the VLAN filtering options while in bridge */
2021-02-13 23:43:19 +03:00
if ( bridge & & ! ! ( priv - > port_vlan_filter & BIT ( port ) ) ! = vlan_filtering ) {
NL_SET_ERR_MSG_MOD ( extack ,
" Dynamic toggling of vlan_filtering not supported " ) ;
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:50 +03:00
return - EIO ;
2021-02-13 23:43:19 +03:00
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
if ( vlan_filtering ) {
/* Use port based VLAN tag */
gswip_switch_mask ( priv ,
GSWIP_PCE_VCTRL_VSR ,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR ,
GSWIP_PCE_VCTRL ( port ) ) ;
gswip_switch_mask ( priv , GSWIP_PCE_PCTRL_0_TVM , 0 ,
GSWIP_PCE_PCTRL_0p ( port ) ) ;
} else {
/* Use port based VLAN tag */
gswip_switch_mask ( priv ,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR ,
GSWIP_PCE_VCTRL_VSR ,
GSWIP_PCE_VCTRL ( port ) ) ;
gswip_switch_mask ( priv , 0 , GSWIP_PCE_PCTRL_0_TVM ,
GSWIP_PCE_PCTRL_0p ( port ) ) ;
}
return 0 ;
}
2018-09-09 23:20:39 +03:00
static int gswip_setup ( struct dsa_switch * ds )
{
struct gswip_priv * priv = ds - > priv ;
unsigned int cpu_port = priv - > hw_info - > cpu_port ;
int i ;
int err ;
gswip_switch_w ( priv , GSWIP_SWRES_R0 , GSWIP_SWRES ) ;
usleep_range ( 5000 , 10000 ) ;
gswip_switch_w ( priv , 0 , GSWIP_SWRES ) ;
/* disable port fetch/store dma on all ports */
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
for ( i = 0 ; i < priv - > hw_info - > max_ports ; i + + ) {
2019-02-24 22:44:43 +03:00
gswip_port_disable ( ds , i ) ;
2021-02-13 23:43:19 +03:00
gswip_port_vlan_filtering ( ds , i , false , NULL ) ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
}
2018-09-09 23:20:39 +03:00
/* enable Switch */
gswip_mdio_mask ( priv , 0 , GSWIP_MDIO_GLOB_ENABLE , GSWIP_MDIO_GLOB ) ;
err = gswip_pce_load_microcode ( priv ) ;
if ( err ) {
dev_err ( priv - > dev , " writing PCE microcode failed, %i " , err ) ;
return err ;
}
/* Default unknown Broadcast/Multicast/Unicast port maps */
gswip_switch_w ( priv , BIT ( cpu_port ) , GSWIP_PCE_PMAP1 ) ;
gswip_switch_w ( priv , BIT ( cpu_port ) , GSWIP_PCE_PMAP2 ) ;
gswip_switch_w ( priv , BIT ( cpu_port ) , GSWIP_PCE_PMAP3 ) ;
2021-04-08 21:38:27 +03:00
/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
* interoperability problem with this auto polling mechanism because
* their status registers think that the link is in a different state
* than it actually is . For the AR8030 it has the BMSR_ESTATEN bit set
* as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL . This makes the
* auto polling state machine consider the link being negotiated with
* 1 Gbit / s . Since the PHY itself is a Fast Ethernet RMII PHY this leads
* to the switch port being completely dead ( RX and TX are both not
* working ) .
* Also with various other PHY / port combinations ( PHY11G GPHY , PHY22F
* GPHY , external RGMII PEF7071 / 7072 ) any traffic would stop . Sometimes
* it would work fine for a few minutes to hours and then stop , on
* other device it would no traffic could be sent or received at all .
* Testing shows that when PHY auto polling is disabled these problems
* go away .
*/
2018-09-09 23:20:39 +03:00
gswip_mdio_w ( priv , 0x0 , GSWIP_MDIO_MDC_CFG0 ) ;
2021-04-08 21:38:27 +03:00
2018-09-09 23:20:39 +03:00
/* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask ( priv , 0xff , 0x09 , GSWIP_MDIO_MDC_CFG1 ) ;
2021-04-08 21:38:28 +03:00
/* Disable the xMII interface and clear it's isolation bit */
2021-01-03 04:25:44 +03:00
for ( i = 0 ; i < priv - > hw_info - > max_ports ; i + + )
2021-04-08 21:38:28 +03:00
gswip_mii_mask_cfg ( priv ,
GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE ,
0 , i ) ;
2018-09-09 23:20:39 +03:00
/* enable special tag insertion on cpu port */
gswip_switch_mask ( priv , 0 , GSWIP_FDMA_PCTRL_STEN ,
GSWIP_FDMA_PCTRLp ( cpu_port ) ) ;
2019-05-06 01:25:06 +03:00
/* accept special tag in ingress direction */
gswip_switch_mask ( priv , 0 , GSWIP_PCE_PCTRL_0_INGRESS ,
GSWIP_PCE_PCTRL_0p ( cpu_port ) ) ;
2018-09-09 23:20:39 +03:00
gswip_switch_mask ( priv , 0 , GSWIP_MAC_CTRL_2_MLEN ,
GSWIP_MAC_CTRL_2p ( cpu_port ) ) ;
2021-09-01 21:49:33 +03:00
gswip_switch_w ( priv , VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN ,
GSWIP_MAC_FLEN ) ;
2018-09-09 23:20:39 +03:00
gswip_switch_mask ( priv , 0 , GSWIP_BM_QUEUE_GCTRL_GL_MOD ,
GSWIP_BM_QUEUE_GCTRL ) ;
/* VLAN aware Switching */
gswip_switch_mask ( priv , 0 , GSWIP_PCE_GCTRL_0_VLAN , GSWIP_PCE_GCTRL_0 ) ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
/* Flush MAC Table */
gswip_switch_mask ( priv , 0 , GSWIP_PCE_GCTRL_0_MTFL , GSWIP_PCE_GCTRL_0 ) ;
err = gswip_switch_r_timeout ( priv , GSWIP_PCE_GCTRL_0 ,
GSWIP_PCE_GCTRL_0_MTFL ) ;
if ( err ) {
dev_err ( priv - > dev , " MAC flushing didn't finish \n " ) ;
return err ;
}
2018-09-09 23:20:39 +03:00
gswip_port_enable ( ds , cpu_port , NULL ) ;
net: dsa: set configure_vlan_while_not_filtering to true by default
As explained in commit 54a0ed0df496 ("net: dsa: provide an option for
drivers to always receive bridge VLANs"), DSA has historically been
skipping VLAN switchdev operations when the bridge wasn't in
vlan_filtering mode, but the reason why it was doing that has never been
clear. So the configure_vlan_while_not_filtering option is there merely
to preserve functionality for existing drivers. It isn't some behavior
that drivers should opt into. Ideally, when all drivers leave this flag
set, we can delete the dsa_port_skip_vlan_configuration() function.
New drivers always seem to omit setting this flag, for some reason. So
let's reverse the logic: the DSA core sets it by default to true before
the .setup() callback, and legacy drivers can turn it off. This way, new
drivers get the new behavior by default, unless they explicitly set the
flag to false, which is more obvious during review.
Remove the assignment from drivers which were setting it to true, and
add the assignment to false for the drivers that didn't previously have
it. This way, it should be easier to see how many we have left.
The following drivers: lan9303, mv88e6060 were skipped from setting this
flag to false, because they didn't have any VLAN offload ops in the
first place.
The Broadcom Starfighter 2 driver calls the common b53_switch_alloc and
therefore also inherits the configure_vlan_while_not_filtering=true
behavior.
Also, print a message through netlink extack every time a VLAN has been
skipped. This is mildly annoying on purpose, so that (a) it is at least
clear that VLANs are being skipped - the legacy behavior in itself is
confusing, and the extack should be much more difficult to miss, unlike
kernel logs - and (b) people have one more incentive to convert to the
new behavior.
No behavior change except for the added prints is intended at this time.
$ ip link add br0 type bridge vlan_filtering 0
$ ip link set sw0p2 master br0
[ 60.315148] br0: port 1(sw0p2) entered blocking state
[ 60.320350] br0: port 1(sw0p2) entered disabled state
[ 60.327839] device sw0p2 entered promiscuous mode
[ 60.334905] br0: port 1(sw0p2) entered blocking state
[ 60.340142] br0: port 1(sw0p2) entered forwarding state
Warning: dsa_core: skipping configuration of VLAN. # This was the pvid
$ bridge vlan add dev sw0p2 vid 100
Warning: dsa_core: skipping configuration of VLAN.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/20210115231919.43834-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-16 02:19:19 +03:00
ds - > configure_vlan_while_not_filtering = false ;
2018-09-09 23:20:39 +03:00
return 0 ;
}
static enum dsa_tag_protocol gswip_get_tag_protocol ( struct dsa_switch * ds ,
2020-01-08 08:06:05 +03:00
int port ,
enum dsa_tag_protocol mp )
2018-09-09 23:20:39 +03:00
{
return DSA_TAG_PROTO_GSWIP ;
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
static int gswip_vlan_active_create ( struct gswip_priv * priv ,
struct net_device * bridge ,
int fid , u16 vid )
{
struct gswip_pce_table_entry vlan_active = { 0 , } ;
unsigned int max_ports = priv - > hw_info - > max_ports ;
int idx = - 1 ;
int err ;
int i ;
/* Look for a free slot */
for ( i = max_ports ; i < ARRAY_SIZE ( priv - > vlans ) ; i + + ) {
if ( ! priv - > vlans [ i ] . bridge ) {
idx = i ;
break ;
}
}
if ( idx = = - 1 )
return - ENOSPC ;
if ( fid = = - 1 )
fid = idx ;
vlan_active . index = idx ;
vlan_active . table = GSWIP_TABLE_ACTIVE_VLAN ;
vlan_active . key [ 0 ] = vid ;
vlan_active . val [ 0 ] = fid ;
vlan_active . valid = true ;
err = gswip_pce_table_entry_write ( priv , & vlan_active ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write active VLAN: %d \n " , err ) ;
return err ;
}
priv - > vlans [ idx ] . bridge = bridge ;
priv - > vlans [ idx ] . vid = vid ;
priv - > vlans [ idx ] . fid = fid ;
return idx ;
}
static int gswip_vlan_active_remove ( struct gswip_priv * priv , int idx )
{
struct gswip_pce_table_entry vlan_active = { 0 , } ;
int err ;
vlan_active . index = idx ;
vlan_active . table = GSWIP_TABLE_ACTIVE_VLAN ;
vlan_active . valid = false ;
err = gswip_pce_table_entry_write ( priv , & vlan_active ) ;
if ( err )
dev_err ( priv - > dev , " failed to delete active VLAN: %d \n " , err ) ;
priv - > vlans [ idx ] . bridge = NULL ;
return err ;
}
static int gswip_vlan_add_unaware ( struct gswip_priv * priv ,
struct net_device * bridge , int port )
{
struct gswip_pce_table_entry vlan_mapping = { 0 , } ;
unsigned int max_ports = priv - > hw_info - > max_ports ;
unsigned int cpu_port = priv - > hw_info - > cpu_port ;
bool active_vlan_created = false ;
int idx = - 1 ;
int i ;
int err ;
/* Check if there is already a page for this bridge */
for ( i = max_ports ; i < ARRAY_SIZE ( priv - > vlans ) ; i + + ) {
if ( priv - > vlans [ i ] . bridge = = bridge ) {
idx = i ;
break ;
}
}
/* If this bridge is not programmed yet, add a Active VLAN table
* entry in a free slot and prepare the VLAN mapping table entry .
*/
if ( idx = = - 1 ) {
idx = gswip_vlan_active_create ( priv , bridge , - 1 , 0 ) ;
if ( idx < 0 )
return idx ;
active_vlan_created = true ;
vlan_mapping . index = idx ;
vlan_mapping . table = GSWIP_TABLE_VLAN_MAPPING ;
/* VLAN ID byte, maps to the VLAN ID of vlan active table */
vlan_mapping . val [ 0 ] = 0 ;
} else {
/* Read the existing VLAN mapping entry from the switch */
vlan_mapping . index = idx ;
vlan_mapping . table = GSWIP_TABLE_VLAN_MAPPING ;
err = gswip_pce_table_entry_read ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to read VLAN mapping: %d \n " ,
err ) ;
return err ;
}
}
/* Update the VLAN mapping entry and write it to the switch */
vlan_mapping . val [ 1 ] | = BIT ( cpu_port ) ;
vlan_mapping . val [ 1 ] | = BIT ( port ) ;
err = gswip_pce_table_entry_write ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write VLAN mapping: %d \n " , err ) ;
/* In case an Active VLAN was creaetd delete it again */
if ( active_vlan_created )
gswip_vlan_active_remove ( priv , idx ) ;
return err ;
}
gswip_switch_w ( priv , 0 , GSWIP_PCE_DEFPVID ( port ) ) ;
return 0 ;
}
2019-05-06 01:25:08 +03:00
static int gswip_vlan_add_aware ( struct gswip_priv * priv ,
struct net_device * bridge , int port ,
u16 vid , bool untagged ,
bool pvid )
{
struct gswip_pce_table_entry vlan_mapping = { 0 , } ;
unsigned int max_ports = priv - > hw_info - > max_ports ;
unsigned int cpu_port = priv - > hw_info - > cpu_port ;
bool active_vlan_created = false ;
int idx = - 1 ;
int fid = - 1 ;
int i ;
int err ;
/* Check if there is already a page for this bridge */
for ( i = max_ports ; i < ARRAY_SIZE ( priv - > vlans ) ; i + + ) {
if ( priv - > vlans [ i ] . bridge = = bridge ) {
if ( fid ! = - 1 & & fid ! = priv - > vlans [ i ] . fid )
dev_err ( priv - > dev , " one bridge with multiple flow ids \n " ) ;
fid = priv - > vlans [ i ] . fid ;
if ( priv - > vlans [ i ] . vid = = vid ) {
idx = i ;
break ;
}
}
}
/* If this bridge is not programmed yet, add a Active VLAN table
* entry in a free slot and prepare the VLAN mapping table entry .
*/
if ( idx = = - 1 ) {
idx = gswip_vlan_active_create ( priv , bridge , fid , vid ) ;
if ( idx < 0 )
return idx ;
active_vlan_created = true ;
vlan_mapping . index = idx ;
vlan_mapping . table = GSWIP_TABLE_VLAN_MAPPING ;
/* VLAN ID byte, maps to the VLAN ID of vlan active table */
vlan_mapping . val [ 0 ] = vid ;
} else {
/* Read the existing VLAN mapping entry from the switch */
vlan_mapping . index = idx ;
vlan_mapping . table = GSWIP_TABLE_VLAN_MAPPING ;
err = gswip_pce_table_entry_read ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to read VLAN mapping: %d \n " ,
err ) ;
return err ;
}
}
vlan_mapping . val [ 0 ] = vid ;
/* Update the VLAN mapping entry and write it to the switch */
vlan_mapping . val [ 1 ] | = BIT ( cpu_port ) ;
vlan_mapping . val [ 2 ] | = BIT ( cpu_port ) ;
vlan_mapping . val [ 1 ] | = BIT ( port ) ;
if ( untagged )
vlan_mapping . val [ 2 ] & = ~ BIT ( port ) ;
else
vlan_mapping . val [ 2 ] | = BIT ( port ) ;
err = gswip_pce_table_entry_write ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write VLAN mapping: %d \n " , err ) ;
/* In case an Active VLAN was creaetd delete it again */
if ( active_vlan_created )
gswip_vlan_active_remove ( priv , idx ) ;
return err ;
}
if ( pvid )
gswip_switch_w ( priv , idx , GSWIP_PCE_DEFPVID ( port ) ) ;
return 0 ;
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
static int gswip_vlan_remove ( struct gswip_priv * priv ,
struct net_device * bridge , int port ,
u16 vid , bool pvid , bool vlan_aware )
{
struct gswip_pce_table_entry vlan_mapping = { 0 , } ;
unsigned int max_ports = priv - > hw_info - > max_ports ;
unsigned int cpu_port = priv - > hw_info - > cpu_port ;
int idx = - 1 ;
int i ;
int err ;
/* Check if there is already a page for this bridge */
for ( i = max_ports ; i < ARRAY_SIZE ( priv - > vlans ) ; i + + ) {
if ( priv - > vlans [ i ] . bridge = = bridge & &
( ! vlan_aware | | priv - > vlans [ i ] . vid = = vid ) ) {
idx = i ;
break ;
}
}
if ( idx = = - 1 ) {
dev_err ( priv - > dev , " bridge to leave does not exists \n " ) ;
return - ENOENT ;
}
vlan_mapping . index = idx ;
vlan_mapping . table = GSWIP_TABLE_VLAN_MAPPING ;
err = gswip_pce_table_entry_read ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to read VLAN mapping: %d \n " , err ) ;
return err ;
}
vlan_mapping . val [ 1 ] & = ~ BIT ( port ) ;
vlan_mapping . val [ 2 ] & = ~ BIT ( port ) ;
err = gswip_pce_table_entry_write ( priv , & vlan_mapping ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write VLAN mapping: %d \n " , err ) ;
return err ;
}
/* In case all ports are removed from the bridge, remove the VLAN */
if ( ( vlan_mapping . val [ 1 ] & ~ BIT ( cpu_port ) ) = = 0 ) {
err = gswip_vlan_active_remove ( priv , idx ) ;
if ( err ) {
dev_err ( priv - > dev , " failed to write active VLAN: %d \n " ,
err ) ;
return err ;
}
}
/* GSWIP 2.2 (GRX300) and later program here the VID directly. */
if ( pvid )
gswip_switch_w ( priv , 0 , GSWIP_PCE_DEFPVID ( port ) ) ;
return 0 ;
}
static int gswip_port_bridge_join ( struct dsa_switch * ds , int port ,
struct net_device * bridge )
{
struct gswip_priv * priv = ds - > priv ;
int err ;
2019-05-06 01:25:08 +03:00
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges . No bridge is configured here .
*/
if ( ! br_vlan_enabled ( bridge ) ) {
err = gswip_vlan_add_unaware ( priv , bridge , port ) ;
if ( err )
return err ;
priv - > port_vlan_filter & = ~ BIT ( port ) ;
} else {
priv - > port_vlan_filter | = BIT ( port ) ;
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
return gswip_add_single_port_br ( priv , port , false ) ;
}
static void gswip_port_bridge_leave ( struct dsa_switch * ds , int port ,
struct net_device * bridge )
{
struct gswip_priv * priv = ds - > priv ;
gswip_add_single_port_br ( priv , port , true ) ;
2019-05-06 01:25:08 +03:00
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges . No bridge is configured here .
*/
if ( ! br_vlan_enabled ( bridge ) )
gswip_vlan_remove ( priv , bridge , port , 0 , true , false ) ;
}
static int gswip_port_vlan_prepare ( struct dsa_switch * ds , int port ,
2021-02-13 23:43:18 +03:00
const struct switchdev_obj_port_vlan * vlan ,
struct netlink_ext_ack * extack )
2019-05-06 01:25:08 +03:00
{
struct gswip_priv * priv = ds - > priv ;
struct net_device * bridge = dsa_to_port ( ds , port ) - > bridge_dev ;
unsigned int max_ports = priv - > hw_info - > max_ports ;
int pos = max_ports ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
int i , idx = - 1 ;
2019-05-06 01:25:08 +03:00
/* We only support VLAN filtering on bridges */
if ( ! dsa_is_cpu_port ( ds , port ) & & ! bridge )
return - EOPNOTSUPP ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
/* Check if there is already a page for this VLAN */
for ( i = max_ports ; i < ARRAY_SIZE ( priv - > vlans ) ; i + + ) {
if ( priv - > vlans [ i ] . bridge = = bridge & &
priv - > vlans [ i ] . vid = = vlan - > vid ) {
idx = i ;
break ;
}
}
2019-05-06 01:25:08 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
/* If this VLAN is not programmed yet, we have to reserve
* one entry in the VLAN table . Make sure we start at the
* next position round .
*/
if ( idx = = - 1 ) {
/* Look for a free slot */
for ( ; pos < ARRAY_SIZE ( priv - > vlans ) ; pos + + ) {
if ( ! priv - > vlans [ pos ] . bridge ) {
idx = pos ;
pos + + ;
2019-05-06 01:25:08 +03:00
break ;
}
}
2021-02-13 23:43:18 +03:00
if ( idx = = - 1 ) {
NL_SET_ERR_MSG_MOD ( extack , " No slot in VLAN table " ) ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
return - ENOSPC ;
2021-02-13 23:43:18 +03:00
}
2019-05-06 01:25:08 +03:00
}
return 0 ;
}
2021-01-09 03:01:53 +03:00
static int gswip_port_vlan_add ( struct dsa_switch * ds , int port ,
2021-02-13 23:43:18 +03:00
const struct switchdev_obj_port_vlan * vlan ,
struct netlink_ext_ack * extack )
2019-05-06 01:25:08 +03:00
{
struct gswip_priv * priv = ds - > priv ;
struct net_device * bridge = dsa_to_port ( ds , port ) - > bridge_dev ;
bool untagged = vlan - > flags & BRIDGE_VLAN_INFO_UNTAGGED ;
bool pvid = vlan - > flags & BRIDGE_VLAN_INFO_PVID ;
2021-01-09 03:01:53 +03:00
int err ;
2021-02-13 23:43:18 +03:00
err = gswip_port_vlan_prepare ( ds , port , vlan , extack ) ;
2021-01-09 03:01:53 +03:00
if ( err )
return err ;
2019-05-06 01:25:08 +03:00
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here . This is also called with bridge
* NULL and then we do not know for which bridge to configure
* this .
*/
if ( dsa_is_cpu_port ( ds , port ) )
2021-01-09 03:01:53 +03:00
return 0 ;
2019-05-06 01:25:08 +03:00
2021-01-09 03:01:53 +03:00
return gswip_vlan_add_aware ( priv , bridge , port , vlan - > vid ,
untagged , pvid ) ;
2019-05-06 01:25:08 +03:00
}
static int gswip_port_vlan_del ( struct dsa_switch * ds , int port ,
const struct switchdev_obj_port_vlan * vlan )
{
struct gswip_priv * priv = ds - > priv ;
struct net_device * bridge = dsa_to_port ( ds , port ) - > bridge_dev ;
bool pvid = vlan - > flags & BRIDGE_VLAN_INFO_PVID ;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here . This is also called with bridge
* NULL and then we do not know for which bridge to configure
* this .
*/
if ( dsa_is_cpu_port ( ds , port ) )
return 0 ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
return gswip_vlan_remove ( priv , bridge , port , vlan - > vid , pvid , true ) ;
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
}
2019-05-06 01:25:09 +03:00
static void gswip_port_fast_age ( struct dsa_switch * ds , int port )
{
struct gswip_priv * priv = ds - > priv ;
struct gswip_pce_table_entry mac_bridge = { 0 , } ;
int i ;
int err ;
for ( i = 0 ; i < 2048 ; i + + ) {
mac_bridge . table = GSWIP_TABLE_MAC_BRIDGE ;
mac_bridge . index = i ;
err = gswip_pce_table_entry_read ( priv , & mac_bridge ) ;
if ( err ) {
2019-05-08 13:22:09 +03:00
dev_err ( priv - > dev , " failed to read mac bridge: %d \n " ,
2019-05-06 01:25:09 +03:00
err ) ;
return ;
}
if ( ! mac_bridge . valid )
continue ;
if ( mac_bridge . val [ 1 ] & GSWIP_TABLE_MAC_BRIDGE_STATIC )
continue ;
if ( ( ( mac_bridge . val [ 0 ] & GENMASK ( 7 , 4 ) ) > > 4 ) ! = port )
continue ;
mac_bridge . valid = false ;
err = gswip_pce_table_entry_write ( priv , & mac_bridge ) ;
if ( err ) {
2019-05-08 13:22:09 +03:00
dev_err ( priv - > dev , " failed to write mac bridge: %d \n " ,
2019-05-06 01:25:09 +03:00
err ) ;
return ;
}
}
}
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
static void gswip_port_stp_state_set ( struct dsa_switch * ds , int port , u8 state )
{
struct gswip_priv * priv = ds - > priv ;
u32 stp_state ;
switch ( state ) {
case BR_STATE_DISABLED :
gswip_switch_mask ( priv , GSWIP_SDMA_PCTRL_EN , 0 ,
GSWIP_SDMA_PCTRLp ( port ) ) ;
return ;
case BR_STATE_BLOCKING :
case BR_STATE_LISTENING :
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN ;
break ;
case BR_STATE_LEARNING :
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING ;
break ;
case BR_STATE_FORWARDING :
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING ;
break ;
default :
dev_err ( priv - > dev , " invalid STP state: %d \n " , state ) ;
return ;
}
gswip_switch_mask ( priv , 0 , GSWIP_SDMA_PCTRL_EN ,
GSWIP_SDMA_PCTRLp ( port ) ) ;
gswip_switch_mask ( priv , GSWIP_PCE_PCTRL_0_PSTATE_MASK , stp_state ,
GSWIP_PCE_PCTRL_0p ( port ) ) ;
}
2019-05-06 01:25:10 +03:00
static int gswip_port_fdb ( struct dsa_switch * ds , int port ,
const unsigned char * addr , u16 vid , bool add )
{
struct gswip_priv * priv = ds - > priv ;
struct net_device * bridge = dsa_to_port ( ds , port ) - > bridge_dev ;
struct gswip_pce_table_entry mac_bridge = { 0 , } ;
unsigned int cpu_port = priv - > hw_info - > cpu_port ;
int fid = - 1 ;
int i ;
int err ;
if ( ! bridge )
return - EINVAL ;
for ( i = cpu_port ; i < ARRAY_SIZE ( priv - > vlans ) ; i + + ) {
if ( priv - > vlans [ i ] . bridge = = bridge ) {
fid = priv - > vlans [ i ] . fid ;
break ;
}
}
if ( fid = = - 1 ) {
dev_err ( priv - > dev , " Port not part of a bridge \n " ) ;
return - EINVAL ;
}
mac_bridge . table = GSWIP_TABLE_MAC_BRIDGE ;
mac_bridge . key_mode = true ;
mac_bridge . key [ 0 ] = addr [ 5 ] | ( addr [ 4 ] < < 8 ) ;
mac_bridge . key [ 1 ] = addr [ 3 ] | ( addr [ 2 ] < < 8 ) ;
mac_bridge . key [ 2 ] = addr [ 1 ] | ( addr [ 0 ] < < 8 ) ;
mac_bridge . key [ 3 ] = fid ;
mac_bridge . val [ 0 ] = add ? BIT ( port ) : 0 ; /* port map */
mac_bridge . val [ 1 ] = GSWIP_TABLE_MAC_BRIDGE_STATIC ;
mac_bridge . valid = add ;
err = gswip_pce_table_entry_write ( priv , & mac_bridge ) ;
if ( err )
2019-05-08 13:22:09 +03:00
dev_err ( priv - > dev , " failed to write mac bridge: %d \n " , err ) ;
2019-05-06 01:25:10 +03:00
return err ;
}
static int gswip_port_fdb_add ( struct dsa_switch * ds , int port ,
const unsigned char * addr , u16 vid )
{
return gswip_port_fdb ( ds , port , addr , vid , true ) ;
}
static int gswip_port_fdb_del ( struct dsa_switch * ds , int port ,
const unsigned char * addr , u16 vid )
{
return gswip_port_fdb ( ds , port , addr , vid , false ) ;
}
static int gswip_port_fdb_dump ( struct dsa_switch * ds , int port ,
dsa_fdb_dump_cb_t * cb , void * data )
{
struct gswip_priv * priv = ds - > priv ;
struct gswip_pce_table_entry mac_bridge = { 0 , } ;
unsigned char addr [ 6 ] ;
int i ;
int err ;
for ( i = 0 ; i < 2048 ; i + + ) {
mac_bridge . table = GSWIP_TABLE_MAC_BRIDGE ;
mac_bridge . index = i ;
err = gswip_pce_table_entry_read ( priv , & mac_bridge ) ;
if ( err ) {
2019-05-08 13:22:09 +03:00
dev_err ( priv - > dev , " failed to write mac bridge: %d \n " ,
2019-05-06 01:25:10 +03:00
err ) ;
return err ;
}
if ( ! mac_bridge . valid )
continue ;
addr [ 5 ] = mac_bridge . key [ 0 ] & 0xff ;
addr [ 4 ] = ( mac_bridge . key [ 0 ] > > 8 ) & 0xff ;
addr [ 3 ] = mac_bridge . key [ 1 ] & 0xff ;
addr [ 2 ] = ( mac_bridge . key [ 1 ] > > 8 ) & 0xff ;
addr [ 1 ] = mac_bridge . key [ 2 ] & 0xff ;
addr [ 0 ] = ( mac_bridge . key [ 2 ] > > 8 ) & 0xff ;
if ( mac_bridge . val [ 1 ] & GSWIP_TABLE_MAC_BRIDGE_STATIC ) {
2021-08-10 14:19:55 +03:00
if ( mac_bridge . val [ 0 ] & BIT ( port ) ) {
err = cb ( addr , 0 , true , data ) ;
if ( err )
return err ;
}
2019-05-06 01:25:10 +03:00
} else {
2021-08-10 14:19:55 +03:00
if ( ( ( mac_bridge . val [ 0 ] & GENMASK ( 7 , 4 ) ) > > 4 ) = = port ) {
err = cb ( addr , 0 , false , data ) ;
if ( err )
return err ;
}
2019-05-06 01:25:10 +03:00
}
}
return 0 ;
}
2021-03-22 23:37:15 +03:00
static void gswip_phylink_set_capab ( unsigned long * supported ,
struct phylink_link_state * state )
2018-09-09 23:20:39 +03:00
{
__ETHTOOL_DECLARE_LINK_MODE_MASK ( mask ) = { 0 , } ;
2021-03-22 23:37:15 +03:00
/* Allow all the expected bits */
phylink_set ( mask , Autoneg ) ;
phylink_set_port_modes ( mask ) ;
phylink_set ( mask , Pause ) ;
phylink_set ( mask , Asym_Pause ) ;
/* With the exclusion of MII, Reverse MII and Reduced MII, we
* support Gigabit , including Half duplex
*/
if ( state - > interface ! = PHY_INTERFACE_MODE_MII & &
state - > interface ! = PHY_INTERFACE_MODE_REVMII & &
state - > interface ! = PHY_INTERFACE_MODE_RMII ) {
phylink_set ( mask , 1000 baseT_Full ) ;
phylink_set ( mask , 1000 baseT_Half ) ;
}
phylink_set ( mask , 10 baseT_Half ) ;
phylink_set ( mask , 10 baseT_Full ) ;
phylink_set ( mask , 100 baseT_Half ) ;
phylink_set ( mask , 100 baseT_Full ) ;
bitmap_and ( supported , supported , mask ,
__ETHTOOL_LINK_MODE_MASK_NBITS ) ;
bitmap_and ( state - > advertising , state - > advertising , mask ,
__ETHTOOL_LINK_MODE_MASK_NBITS ) ;
}
static void gswip_xrx200_phylink_validate ( struct dsa_switch * ds , int port ,
unsigned long * supported ,
struct phylink_link_state * state )
{
2018-09-09 23:20:39 +03:00
switch ( port ) {
case 0 :
case 1 :
if ( ! phy_interface_mode_is_rgmii ( state - > interface ) & &
state - > interface ! = PHY_INTERFACE_MODE_MII & &
state - > interface ! = PHY_INTERFACE_MODE_REVMII & &
2018-09-15 15:08:48 +03:00
state - > interface ! = PHY_INTERFACE_MODE_RMII )
goto unsupported ;
2018-09-09 23:20:39 +03:00
break ;
case 2 :
case 3 :
case 4 :
2018-09-15 15:08:48 +03:00
if ( state - > interface ! = PHY_INTERFACE_MODE_INTERNAL )
goto unsupported ;
2018-09-09 23:20:39 +03:00
break ;
case 5 :
if ( ! phy_interface_mode_is_rgmii ( state - > interface ) & &
2018-09-15 15:08:48 +03:00
state - > interface ! = PHY_INTERFACE_MODE_INTERNAL )
goto unsupported ;
2018-09-09 23:20:39 +03:00
break ;
2018-09-15 15:08:48 +03:00
default :
bitmap_zero ( supported , __ETHTOOL_LINK_MODE_MASK_NBITS ) ;
dev_err ( ds - > dev , " Unsupported port: %i \n " , port ) ;
return ;
2018-09-09 23:20:39 +03:00
}
2021-03-22 23:37:15 +03:00
gswip_phylink_set_capab ( supported , state ) ;
2018-09-09 23:20:39 +03:00
2021-03-22 23:37:15 +03:00
return ;
unsupported :
bitmap_zero ( supported , __ETHTOOL_LINK_MODE_MASK_NBITS ) ;
dev_err ( ds - > dev , " Unsupported interface '%s' for port %d \n " ,
phy_modes ( state - > interface ) , port ) ;
}
static void gswip_xrx300_phylink_validate ( struct dsa_switch * ds , int port ,
unsigned long * supported ,
struct phylink_link_state * state )
{
switch ( port ) {
case 0 :
if ( ! phy_interface_mode_is_rgmii ( state - > interface ) & &
state - > interface ! = PHY_INTERFACE_MODE_GMII & &
state - > interface ! = PHY_INTERFACE_MODE_RMII )
goto unsupported ;
break ;
case 1 :
case 2 :
case 3 :
case 4 :
if ( state - > interface ! = PHY_INTERFACE_MODE_INTERNAL )
goto unsupported ;
break ;
case 5 :
if ( ! phy_interface_mode_is_rgmii ( state - > interface ) & &
state - > interface ! = PHY_INTERFACE_MODE_INTERNAL & &
state - > interface ! = PHY_INTERFACE_MODE_RMII )
goto unsupported ;
break ;
default :
bitmap_zero ( supported , __ETHTOOL_LINK_MODE_MASK_NBITS ) ;
dev_err ( ds - > dev , " Unsupported port: %i \n " , port ) ;
return ;
2018-09-09 23:20:39 +03:00
}
2021-03-22 23:37:15 +03:00
gswip_phylink_set_capab ( supported , state ) ;
2018-09-09 23:20:39 +03:00
2018-09-15 15:08:48 +03:00
return ;
unsupported :
bitmap_zero ( supported , __ETHTOOL_LINK_MODE_MASK_NBITS ) ;
2020-06-07 16:02:58 +03:00
dev_err ( ds - > dev , " Unsupported interface '%s' for port %d \n " ,
phy_modes ( state - > interface ) , port ) ;
2018-09-09 23:20:39 +03:00
}
2021-04-08 21:38:27 +03:00
static void gswip_port_set_link ( struct gswip_priv * priv , int port , bool link )
{
u32 mdio_phy ;
if ( link )
mdio_phy = GSWIP_MDIO_PHY_LINK_UP ;
else
mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN ;
gswip_mdio_mask ( priv , GSWIP_MDIO_PHY_LINK_MASK , mdio_phy ,
GSWIP_MDIO_PHYp ( port ) ) ;
}
static void gswip_port_set_speed ( struct gswip_priv * priv , int port , int speed ,
phy_interface_t interface )
{
u32 mdio_phy = 0 , mii_cfg = 0 , mac_ctrl_0 = 0 ;
switch ( speed ) {
case SPEED_10 :
mdio_phy = GSWIP_MDIO_PHY_SPEED_M10 ;
if ( interface = = PHY_INTERFACE_MODE_RMII )
mii_cfg = GSWIP_MII_CFG_RATE_M50 ;
else
mii_cfg = GSWIP_MII_CFG_RATE_M2P5 ;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII ;
break ;
case SPEED_100 :
mdio_phy = GSWIP_MDIO_PHY_SPEED_M100 ;
if ( interface = = PHY_INTERFACE_MODE_RMII )
mii_cfg = GSWIP_MII_CFG_RATE_M50 ;
else
mii_cfg = GSWIP_MII_CFG_RATE_M25 ;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII ;
break ;
case SPEED_1000 :
mdio_phy = GSWIP_MDIO_PHY_SPEED_G1 ;
mii_cfg = GSWIP_MII_CFG_RATE_M125 ;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII ;
break ;
}
gswip_mdio_mask ( priv , GSWIP_MDIO_PHY_SPEED_MASK , mdio_phy ,
GSWIP_MDIO_PHYp ( port ) ) ;
gswip_mii_mask_cfg ( priv , GSWIP_MII_CFG_RATE_MASK , mii_cfg , port ) ;
gswip_switch_mask ( priv , GSWIP_MAC_CTRL_0_GMII_MASK , mac_ctrl_0 ,
GSWIP_MAC_CTRL_0p ( port ) ) ;
}
static void gswip_port_set_duplex ( struct gswip_priv * priv , int port , int duplex )
{
u32 mac_ctrl_0 , mdio_phy ;
if ( duplex = = DUPLEX_FULL ) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN ;
mdio_phy = GSWIP_MDIO_PHY_FDUP_EN ;
} else {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS ;
mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS ;
}
gswip_switch_mask ( priv , GSWIP_MAC_CTRL_0_FDUP_MASK , mac_ctrl_0 ,
GSWIP_MAC_CTRL_0p ( port ) ) ;
gswip_mdio_mask ( priv , GSWIP_MDIO_PHY_FDUP_MASK , mdio_phy ,
GSWIP_MDIO_PHYp ( port ) ) ;
}
static void gswip_port_set_pause ( struct gswip_priv * priv , int port ,
bool tx_pause , bool rx_pause )
{
u32 mac_ctrl_0 , mdio_phy ;
if ( tx_pause & & rx_pause ) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX ;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
GSWIP_MDIO_PHY_FCONRX_EN ;
} else if ( tx_pause ) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX ;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
GSWIP_MDIO_PHY_FCONRX_DIS ;
} else if ( rx_pause ) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX ;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
GSWIP_MDIO_PHY_FCONRX_EN ;
} else {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE ;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
GSWIP_MDIO_PHY_FCONRX_DIS ;
}
gswip_switch_mask ( priv , GSWIP_MAC_CTRL_0_FCON_MASK ,
mac_ctrl_0 , GSWIP_MAC_CTRL_0p ( port ) ) ;
gswip_mdio_mask ( priv ,
GSWIP_MDIO_PHY_FCONTX_MASK |
GSWIP_MDIO_PHY_FCONRX_MASK ,
mdio_phy , GSWIP_MDIO_PHYp ( port ) ) ;
}
2018-09-09 23:20:39 +03:00
static void gswip_phylink_mac_config ( struct dsa_switch * ds , int port ,
unsigned int mode ,
const struct phylink_link_state * state )
{
struct gswip_priv * priv = ds - > priv ;
u32 miicfg = 0 ;
miicfg | = GSWIP_MII_CFG_LDCLKDIS ;
switch ( state - > interface ) {
case PHY_INTERFACE_MODE_MII :
case PHY_INTERFACE_MODE_INTERNAL :
miicfg | = GSWIP_MII_CFG_MODE_MIIM ;
break ;
case PHY_INTERFACE_MODE_REVMII :
miicfg | = GSWIP_MII_CFG_MODE_MIIP ;
break ;
case PHY_INTERFACE_MODE_RMII :
miicfg | = GSWIP_MII_CFG_MODE_RMIIM ;
2021-04-08 21:38:28 +03:00
/* Configure the RMII clock as output: */
miicfg | = GSWIP_MII_CFG_RMII_CLK ;
2018-09-09 23:20:39 +03:00
break ;
case PHY_INTERFACE_MODE_RGMII :
case PHY_INTERFACE_MODE_RGMII_ID :
case PHY_INTERFACE_MODE_RGMII_RXID :
case PHY_INTERFACE_MODE_RGMII_TXID :
miicfg | = GSWIP_MII_CFG_MODE_RGMII ;
break ;
2021-03-22 23:37:15 +03:00
case PHY_INTERFACE_MODE_GMII :
miicfg | = GSWIP_MII_CFG_MODE_GMII ;
break ;
2018-09-09 23:20:39 +03:00
default :
dev_err ( ds - > dev ,
" Unsupported interface: %d \n " , state - > interface ) ;
return ;
}
2021-04-08 21:38:28 +03:00
gswip_mii_mask_cfg ( priv ,
GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS ,
miicfg , port ) ;
2018-09-09 23:20:39 +03:00
switch ( state - > interface ) {
case PHY_INTERFACE_MODE_RGMII_ID :
gswip_mii_mask_pcdu ( priv , GSWIP_MII_PCDU_TXDLY_MASK |
GSWIP_MII_PCDU_RXDLY_MASK , 0 , port ) ;
break ;
case PHY_INTERFACE_MODE_RGMII_RXID :
gswip_mii_mask_pcdu ( priv , GSWIP_MII_PCDU_RXDLY_MASK , 0 , port ) ;
break ;
case PHY_INTERFACE_MODE_RGMII_TXID :
gswip_mii_mask_pcdu ( priv , GSWIP_MII_PCDU_TXDLY_MASK , 0 , port ) ;
break ;
default :
break ;
}
}
static void gswip_phylink_mac_link_down ( struct dsa_switch * ds , int port ,
unsigned int mode ,
phy_interface_t interface )
{
struct gswip_priv * priv = ds - > priv ;
gswip_mii_mask_cfg ( priv , GSWIP_MII_CFG_EN , 0 , port ) ;
2021-04-08 21:38:27 +03:00
if ( ! dsa_is_cpu_port ( ds , port ) )
gswip_port_set_link ( priv , port , false ) ;
2018-09-09 23:20:39 +03:00
}
static void gswip_phylink_mac_link_up ( struct dsa_switch * ds , int port ,
unsigned int mode ,
phy_interface_t interface ,
2020-02-26 13:23:46 +03:00
struct phy_device * phydev ,
int speed , int duplex ,
bool tx_pause , bool rx_pause )
2018-09-09 23:20:39 +03:00
{
struct gswip_priv * priv = ds - > priv ;
2021-04-08 21:38:27 +03:00
if ( ! dsa_is_cpu_port ( ds , port ) ) {
gswip_port_set_link ( priv , port , true ) ;
gswip_port_set_speed ( priv , port , speed , interface ) ;
gswip_port_set_duplex ( priv , port , duplex ) ;
gswip_port_set_pause ( priv , port , tx_pause , rx_pause ) ;
}
2021-01-03 04:25:43 +03:00
gswip_mii_mask_cfg ( priv , 0 , GSWIP_MII_CFG_EN , port ) ;
2018-09-09 23:20:39 +03:00
}
static void gswip_get_strings ( struct dsa_switch * ds , int port , u32 stringset ,
uint8_t * data )
{
int i ;
if ( stringset ! = ETH_SS_STATS )
return ;
for ( i = 0 ; i < ARRAY_SIZE ( gswip_rmon_cnt ) ; i + + )
strncpy ( data + i * ETH_GSTRING_LEN , gswip_rmon_cnt [ i ] . name ,
ETH_GSTRING_LEN ) ;
}
static u32 gswip_bcm_ram_entry_read ( struct gswip_priv * priv , u32 table ,
u32 index )
{
u32 result ;
int err ;
gswip_switch_w ( priv , index , GSWIP_BM_RAM_ADDR ) ;
gswip_switch_mask ( priv , GSWIP_BM_RAM_CTRL_ADDR_MASK |
GSWIP_BM_RAM_CTRL_OPMOD ,
table | GSWIP_BM_RAM_CTRL_BAS ,
GSWIP_BM_RAM_CTRL ) ;
err = gswip_switch_r_timeout ( priv , GSWIP_BM_RAM_CTRL ,
GSWIP_BM_RAM_CTRL_BAS ) ;
if ( err ) {
dev_err ( priv - > dev , " timeout while reading table: %u, index: %u " ,
table , index ) ;
return 0 ;
}
result = gswip_switch_r ( priv , GSWIP_BM_RAM_VAL ( 0 ) ) ;
result | = gswip_switch_r ( priv , GSWIP_BM_RAM_VAL ( 1 ) ) < < 16 ;
return result ;
}
static void gswip_get_ethtool_stats ( struct dsa_switch * ds , int port ,
uint64_t * data )
{
struct gswip_priv * priv = ds - > priv ;
const struct gswip_rmon_cnt_desc * rmon_cnt ;
int i ;
u64 high ;
for ( i = 0 ; i < ARRAY_SIZE ( gswip_rmon_cnt ) ; i + + ) {
rmon_cnt = & gswip_rmon_cnt [ i ] ;
data [ i ] = gswip_bcm_ram_entry_read ( priv , port ,
rmon_cnt - > offset ) ;
if ( rmon_cnt - > size = = 2 ) {
high = gswip_bcm_ram_entry_read ( priv , port ,
rmon_cnt - > offset + 1 ) ;
data [ i ] | = high < < 32 ;
}
}
}
static int gswip_get_sset_count ( struct dsa_switch * ds , int port , int sset )
{
if ( sset ! = ETH_SS_STATS )
return 0 ;
return ARRAY_SIZE ( gswip_rmon_cnt ) ;
}
2021-03-22 23:37:15 +03:00
static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
2018-09-09 23:20:39 +03:00
. get_tag_protocol = gswip_get_tag_protocol ,
. setup = gswip_setup ,
. port_enable = gswip_port_enable ,
. port_disable = gswip_port_disable ,
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
. port_bridge_join = gswip_port_bridge_join ,
. port_bridge_leave = gswip_port_bridge_leave ,
2019-05-06 01:25:09 +03:00
. port_fast_age = gswip_port_fast_age ,
2019-05-06 01:25:08 +03:00
. port_vlan_filtering = gswip_port_vlan_filtering ,
. port_vlan_add = gswip_port_vlan_add ,
. port_vlan_del = gswip_port_vlan_del ,
net: dsa: lantiq: Add VLAN unaware bridge offloading
This allows to offload bridges with DSA to the switch hardware and do
the packet forwarding in hardware.
This implements generic functions to access the switch hardware tables,
which are used to control many features of the switch.
This patch activates the MAC learning by removing the MAC address table
lock, to prevent uncontrolled forwarding of packets between all the LAN
ports, they are added into individual bridge tables entries with
individual flow ids and the switch will do the MAC learning for each
port separately before they are added to a real bridge.
Each bridge consist of an entry in the active VLAN table and the VLAN
mapping table, table entries with the same index are matching. In the
VLAN unaware mode we configure everything with VLAN ID 0, but we use
different flow IDs, the switch should handle all VLANs as normal payload
and ignore them. When the hardware looks for the port of the destination
MAC address it only takes the entries which have the same flow ID of the
ingress packet.
The bridges are configured with 64 possible entries with these
information:
Table Index, 0...63
VLAN ID, 0...4095: VLAN ID 0 is untagged
flow ID, 0..63: Same flow IDs share entries in MAC learning table
port map, one bit for each port number
tagged port map, one bit for each port number
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-06 01:25:07 +03:00
. port_stp_state_set = gswip_port_stp_state_set ,
2019-05-06 01:25:10 +03:00
. port_fdb_add = gswip_port_fdb_add ,
. port_fdb_del = gswip_port_fdb_del ,
. port_fdb_dump = gswip_port_fdb_dump ,
2021-03-22 23:37:15 +03:00
. phylink_validate = gswip_xrx200_phylink_validate ,
. phylink_mac_config = gswip_phylink_mac_config ,
. phylink_mac_link_down = gswip_phylink_mac_link_down ,
. phylink_mac_link_up = gswip_phylink_mac_link_up ,
. get_strings = gswip_get_strings ,
. get_ethtool_stats = gswip_get_ethtool_stats ,
. get_sset_count = gswip_get_sset_count ,
} ;
static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
. get_tag_protocol = gswip_get_tag_protocol ,
. setup = gswip_setup ,
. port_enable = gswip_port_enable ,
. port_disable = gswip_port_disable ,
. port_bridge_join = gswip_port_bridge_join ,
. port_bridge_leave = gswip_port_bridge_leave ,
. port_fast_age = gswip_port_fast_age ,
. port_vlan_filtering = gswip_port_vlan_filtering ,
. port_vlan_add = gswip_port_vlan_add ,
. port_vlan_del = gswip_port_vlan_del ,
. port_stp_state_set = gswip_port_stp_state_set ,
. port_fdb_add = gswip_port_fdb_add ,
. port_fdb_del = gswip_port_fdb_del ,
. port_fdb_dump = gswip_port_fdb_dump ,
. phylink_validate = gswip_xrx300_phylink_validate ,
2018-09-09 23:20:39 +03:00
. phylink_mac_config = gswip_phylink_mac_config ,
. phylink_mac_link_down = gswip_phylink_mac_link_down ,
. phylink_mac_link_up = gswip_phylink_mac_link_up ,
. get_strings = gswip_get_strings ,
. get_ethtool_stats = gswip_get_ethtool_stats ,
. get_sset_count = gswip_get_sset_count ,
} ;
static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
. fe_firmware_name = " lantiq/xrx200_phy22f_a14.bin " ,
. ge_firmware_name = " lantiq/xrx200_phy11g_a14.bin " ,
} ;
static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
. fe_firmware_name = " lantiq/xrx200_phy22f_a22.bin " ,
. ge_firmware_name = " lantiq/xrx200_phy11g_a22.bin " ,
} ;
static const struct xway_gphy_match_data xrx300_gphy_data = {
. fe_firmware_name = " lantiq/xrx300_phy22f_a21.bin " ,
. ge_firmware_name = " lantiq/xrx300_phy11g_a21.bin " ,
} ;
static const struct of_device_id xway_gphy_match [ ] = {
{ . compatible = " lantiq,xrx200-gphy-fw " , . data = NULL } ,
{ . compatible = " lantiq,xrx200a1x-gphy-fw " , . data = & xrx200a1x_gphy_data } ,
{ . compatible = " lantiq,xrx200a2x-gphy-fw " , . data = & xrx200a2x_gphy_data } ,
{ . compatible = " lantiq,xrx300-gphy-fw " , . data = & xrx300_gphy_data } ,
{ . compatible = " lantiq,xrx330-gphy-fw " , . data = & xrx300_gphy_data } ,
{ } ,
} ;
static int gswip_gphy_fw_load ( struct gswip_priv * priv , struct gswip_gphy_fw * gphy_fw )
{
struct device * dev = priv - > dev ;
const struct firmware * fw ;
void * fw_addr ;
dma_addr_t dma_addr ;
dma_addr_t dev_addr ;
size_t size ;
int ret ;
ret = clk_prepare_enable ( gphy_fw - > clk_gate ) ;
if ( ret )
return ret ;
reset_control_assert ( gphy_fw - > reset ) ;
2021-09-12 14:58:07 +03:00
/* The vendor BSP uses a 200ms delay after asserting the reset line.
* Without this some users are observing that the PHY is not coming up
* on the MDIO bus .
*/
msleep ( 200 ) ;
2018-09-09 23:20:39 +03:00
ret = request_firmware ( & fw , gphy_fw - > fw_name , dev ) ;
if ( ret ) {
dev_err ( dev , " failed to load firmware: %s, error: %i \n " ,
gphy_fw - > fw_name , ret ) ;
return ret ;
}
/* GPHY cores need the firmware code in a persistent and contiguous
* memory area with a 16 kB boundary aligned start address .
*/
size = fw - > size + XRX200_GPHY_FW_ALIGN ;
fw_addr = dmam_alloc_coherent ( dev , size , & dma_addr , GFP_KERNEL ) ;
if ( fw_addr ) {
fw_addr = PTR_ALIGN ( fw_addr , XRX200_GPHY_FW_ALIGN ) ;
dev_addr = ALIGN ( dma_addr , XRX200_GPHY_FW_ALIGN ) ;
memcpy ( fw_addr , fw - > data , fw - > size ) ;
} else {
dev_err ( dev , " failed to alloc firmware memory \n " ) ;
release_firmware ( fw ) ;
return - ENOMEM ;
}
release_firmware ( fw ) ;
ret = regmap_write ( priv - > rcu_regmap , gphy_fw - > fw_addr_offset , dev_addr ) ;
if ( ret )
return ret ;
reset_control_deassert ( gphy_fw - > reset ) ;
return ret ;
}
static int gswip_gphy_fw_probe ( struct gswip_priv * priv ,
struct gswip_gphy_fw * gphy_fw ,
struct device_node * gphy_fw_np , int i )
{
struct device * dev = priv - > dev ;
u32 gphy_mode ;
int ret ;
char gphyname [ 10 ] ;
snprintf ( gphyname , sizeof ( gphyname ) , " gphy%d " , i ) ;
gphy_fw - > clk_gate = devm_clk_get ( dev , gphyname ) ;
if ( IS_ERR ( gphy_fw - > clk_gate ) ) {
dev_err ( dev , " Failed to lookup gate clock \n " ) ;
return PTR_ERR ( gphy_fw - > clk_gate ) ;
}
ret = of_property_read_u32 ( gphy_fw_np , " reg " , & gphy_fw - > fw_addr_offset ) ;
if ( ret )
return ret ;
ret = of_property_read_u32 ( gphy_fw_np , " lantiq,gphy-mode " , & gphy_mode ) ;
/* Default to GE mode */
if ( ret )
gphy_mode = GPHY_MODE_GE ;
switch ( gphy_mode ) {
case GPHY_MODE_FE :
gphy_fw - > fw_name = priv - > gphy_fw_name_cfg - > fe_firmware_name ;
break ;
case GPHY_MODE_GE :
gphy_fw - > fw_name = priv - > gphy_fw_name_cfg - > ge_firmware_name ;
break ;
default :
dev_err ( dev , " Unknown GPHY mode %d \n " , gphy_mode ) ;
return - EINVAL ;
}
gphy_fw - > reset = of_reset_control_array_get_exclusive ( gphy_fw_np ) ;
2018-09-15 04:33:38 +03:00
if ( IS_ERR ( gphy_fw - > reset ) ) {
if ( PTR_ERR ( gphy_fw - > reset ) ! = - EPROBE_DEFER )
2018-09-09 23:20:39 +03:00
dev_err ( dev , " Failed to lookup gphy reset \n " ) ;
2018-09-15 04:33:38 +03:00
return PTR_ERR ( gphy_fw - > reset ) ;
2018-09-09 23:20:39 +03:00
}
return gswip_gphy_fw_load ( priv , gphy_fw ) ;
}
static void gswip_gphy_fw_remove ( struct gswip_priv * priv ,
struct gswip_gphy_fw * gphy_fw )
{
int ret ;
/* check if the device was fully probed */
if ( ! gphy_fw - > fw_name )
return ;
ret = regmap_write ( priv - > rcu_regmap , gphy_fw - > fw_addr_offset , 0 ) ;
if ( ret )
dev_err ( priv - > dev , " can not reset GPHY FW pointer " ) ;
clk_disable_unprepare ( gphy_fw - > clk_gate ) ;
reset_control_put ( gphy_fw - > reset ) ;
}
static int gswip_gphy_fw_list ( struct gswip_priv * priv ,
struct device_node * gphy_fw_list_np , u32 version )
{
struct device * dev = priv - > dev ;
struct device_node * gphy_fw_np ;
const struct of_device_id * match ;
int err ;
int i = 0 ;
2018-09-15 15:08:48 +03:00
/* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
2018-09-09 23:20:39 +03:00
* GPHY firmware . The VRX200 rev 1.2 uses the GSWIP 2.1 and also
* needs a different GPHY firmware .
*/
if ( of_device_is_compatible ( gphy_fw_list_np , " lantiq,xrx200-gphy-fw " ) ) {
switch ( version ) {
case GSWIP_VERSION_2_0 :
priv - > gphy_fw_name_cfg = & xrx200a1x_gphy_data ;
break ;
case GSWIP_VERSION_2_1 :
priv - > gphy_fw_name_cfg = & xrx200a2x_gphy_data ;
break ;
default :
dev_err ( dev , " unknown GSWIP version: 0x%x " , version ) ;
return - ENOENT ;
}
}
match = of_match_node ( xway_gphy_match , gphy_fw_list_np ) ;
if ( match & & match - > data )
priv - > gphy_fw_name_cfg = match - > data ;
if ( ! priv - > gphy_fw_name_cfg ) {
dev_err ( dev , " GPHY compatible type not supported " ) ;
return - ENOENT ;
}
priv - > num_gphy_fw = of_get_available_child_count ( gphy_fw_list_np ) ;
if ( ! priv - > num_gphy_fw )
return - ENOENT ;
priv - > rcu_regmap = syscon_regmap_lookup_by_phandle ( gphy_fw_list_np ,
" lantiq,rcu " ) ;
if ( IS_ERR ( priv - > rcu_regmap ) )
return PTR_ERR ( priv - > rcu_regmap ) ;
priv - > gphy_fw = devm_kmalloc_array ( dev , priv - > num_gphy_fw ,
sizeof ( * priv - > gphy_fw ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! priv - > gphy_fw )
return - ENOMEM ;
for_each_available_child_of_node ( gphy_fw_list_np , gphy_fw_np ) {
err = gswip_gphy_fw_probe ( priv , & priv - > gphy_fw [ i ] ,
gphy_fw_np , i ) ;
if ( err )
goto remove_gphy ;
i + + ;
}
2020-11-15 19:57:57 +03:00
/* The standalone PHY11G requires 300ms to be fully
* initialized and ready for any MDIO communication after being
* taken out of reset . For the SoC - internal GPHY variant there
* is no ( known ) documentation for the minimum time after a
* reset . Use the same value as for the standalone variant as
* some users have reported internal PHYs not being detected
* without any delay .
*/
msleep ( 300 ) ;
2018-09-09 23:20:39 +03:00
return 0 ;
remove_gphy :
for ( i = 0 ; i < priv - > num_gphy_fw ; i + + )
gswip_gphy_fw_remove ( priv , & priv - > gphy_fw [ i ] ) ;
return err ;
}
static int gswip_probe ( struct platform_device * pdev )
{
struct gswip_priv * priv ;
2021-03-22 23:37:16 +03:00
struct device_node * np , * mdio_np , * gphy_fw_np ;
2018-09-09 23:20:39 +03:00
struct device * dev = & pdev - > dev ;
int err ;
int i ;
u32 version ;
priv = devm_kzalloc ( dev , sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
2019-08-01 15:25:46 +03:00
priv - > gswip = devm_platform_ioremap_resource ( pdev , 0 ) ;
2018-09-15 04:33:21 +03:00
if ( IS_ERR ( priv - > gswip ) )
return PTR_ERR ( priv - > gswip ) ;
2018-09-09 23:20:39 +03:00
2019-08-01 15:25:46 +03:00
priv - > mdio = devm_platform_ioremap_resource ( pdev , 1 ) ;
2018-09-15 04:33:21 +03:00
if ( IS_ERR ( priv - > mdio ) )
return PTR_ERR ( priv - > mdio ) ;
2018-09-09 23:20:39 +03:00
2019-08-01 15:25:46 +03:00
priv - > mii = devm_platform_ioremap_resource ( pdev , 2 ) ;
2018-09-15 04:33:21 +03:00
if ( IS_ERR ( priv - > mii ) )
return PTR_ERR ( priv - > mii ) ;
2018-09-09 23:20:39 +03:00
priv - > hw_info = of_device_get_match_data ( dev ) ;
if ( ! priv - > hw_info )
return - EINVAL ;
2019-10-21 23:51:30 +03:00
priv - > ds = devm_kzalloc ( dev , sizeof ( * priv - > ds ) , GFP_KERNEL ) ;
2018-09-09 23:20:39 +03:00
if ( ! priv - > ds )
return - ENOMEM ;
2019-10-21 23:51:30 +03:00
priv - > ds - > dev = dev ;
priv - > ds - > num_ports = priv - > hw_info - > max_ports ;
2018-09-09 23:20:39 +03:00
priv - > ds - > priv = priv ;
2021-03-22 23:37:15 +03:00
priv - > ds - > ops = priv - > hw_info - > ops ;
2018-09-09 23:20:39 +03:00
priv - > dev = dev ;
version = gswip_switch_r ( priv , GSWIP_VERSION ) ;
2021-03-22 23:37:16 +03:00
np = dev - > of_node ;
switch ( version ) {
case GSWIP_VERSION_2_0 :
case GSWIP_VERSION_2_1 :
if ( ! of_device_is_compatible ( np , " lantiq,xrx200-gswip " ) )
return - EINVAL ;
break ;
case GSWIP_VERSION_2_2 :
case GSWIP_VERSION_2_2_ETC :
if ( ! of_device_is_compatible ( np , " lantiq,xrx300-gswip " ) & &
! of_device_is_compatible ( np , " lantiq,xrx330-gswip " ) )
return - EINVAL ;
break ;
default :
dev_err ( dev , " unknown GSWIP version: 0x%x " , version ) ;
return - ENOENT ;
}
2018-09-09 23:20:39 +03:00
/* bring up the mdio bus */
2019-01-16 13:23:34 +03:00
gphy_fw_np = of_get_compatible_child ( dev - > of_node , " lantiq,gphy-fw " ) ;
2018-09-09 23:20:39 +03:00
if ( gphy_fw_np ) {
err = gswip_gphy_fw_list ( priv , gphy_fw_np , version ) ;
2019-01-16 13:23:34 +03:00
of_node_put ( gphy_fw_np ) ;
2018-09-09 23:20:39 +03:00
if ( err ) {
dev_err ( dev , " gphy fw probe failed \n " ) ;
return err ;
}
}
/* bring up the mdio bus */
2019-01-16 13:23:34 +03:00
mdio_np = of_get_compatible_child ( dev - > of_node , " lantiq,xrx200-mdio " ) ;
2018-09-09 23:20:39 +03:00
if ( mdio_np ) {
err = gswip_mdio ( priv , mdio_np ) ;
if ( err ) {
dev_err ( dev , " mdio probe failed \n " ) ;
2019-01-16 13:23:34 +03:00
goto put_mdio_node ;
2018-09-09 23:20:39 +03:00
}
}
err = dsa_register_switch ( priv - > ds ) ;
if ( err ) {
dev_err ( dev , " dsa switch register failed: %i \n " , err ) ;
goto mdio_bus ;
}
2018-09-15 15:08:48 +03:00
if ( ! dsa_is_cpu_port ( priv - > ds , priv - > hw_info - > cpu_port ) ) {
2018-09-09 23:20:39 +03:00
dev_err ( dev , " wrong CPU port defined, HW only supports port: %i " ,
priv - > hw_info - > cpu_port ) ;
err = - EINVAL ;
2019-01-16 13:23:33 +03:00
goto disable_switch ;
2018-09-09 23:20:39 +03:00
}
platform_set_drvdata ( pdev , priv ) ;
dev_info ( dev , " probed GSWIP version %lx mod %lx \n " ,
( version & GSWIP_VERSION_REV_MASK ) > > GSWIP_VERSION_REV_SHIFT ,
( version & GSWIP_VERSION_MOD_MASK ) > > GSWIP_VERSION_MOD_SHIFT ) ;
return 0 ;
2019-01-16 13:23:33 +03:00
disable_switch :
gswip_mdio_mask ( priv , GSWIP_MDIO_GLOB_ENABLE , 0 , GSWIP_MDIO_GLOB ) ;
dsa_unregister_switch ( priv - > ds ) ;
2018-09-09 23:20:39 +03:00
mdio_bus :
if ( mdio_np )
mdiobus_unregister ( priv - > ds - > slave_mii_bus ) ;
2019-01-16 13:23:34 +03:00
put_mdio_node :
of_node_put ( mdio_np ) ;
2018-09-09 23:20:39 +03:00
for ( i = 0 ; i < priv - > num_gphy_fw ; i + + )
gswip_gphy_fw_remove ( priv , & priv - > gphy_fw [ i ] ) ;
return err ;
}
static int gswip_remove ( struct platform_device * pdev )
{
struct gswip_priv * priv = platform_get_drvdata ( pdev ) ;
int i ;
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
if ( ! priv )
return 0 ;
2018-09-09 23:20:39 +03:00
/* disable the switch */
gswip_mdio_mask ( priv , GSWIP_MDIO_GLOB_ENABLE , 0 , GSWIP_MDIO_GLOB ) ;
dsa_unregister_switch ( priv - > ds ) ;
2019-01-16 13:23:34 +03:00
if ( priv - > ds - > slave_mii_bus ) {
2018-09-09 23:20:39 +03:00
mdiobus_unregister ( priv - > ds - > slave_mii_bus ) ;
2019-01-16 13:23:34 +03:00
of_node_put ( priv - > ds - > slave_mii_bus - > dev . of_node ) ;
}
2018-09-09 23:20:39 +03:00
for ( i = 0 ; i < priv - > num_gphy_fw ; i + + )
gswip_gphy_fw_remove ( priv , & priv - > gphy_fw [ i ] ) ;
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
platform_set_drvdata ( pdev , NULL ) ;
2018-09-09 23:20:39 +03:00
return 0 ;
}
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
static void gswip_shutdown ( struct platform_device * pdev )
{
struct gswip_priv * priv = platform_get_drvdata ( pdev ) ;
if ( ! priv )
return ;
dsa_switch_shutdown ( priv - > ds ) ;
platform_set_drvdata ( pdev , NULL ) ;
}
2018-09-09 23:20:39 +03:00
static const struct gswip_hw_info gswip_xrx200 = {
. max_ports = 7 ,
. cpu_port = 6 ,
2021-03-22 23:37:15 +03:00
. ops = & gswip_xrx200_switch_ops ,
} ;
static const struct gswip_hw_info gswip_xrx300 = {
. max_ports = 7 ,
. cpu_port = 6 ,
. ops = & gswip_xrx300_switch_ops ,
2018-09-09 23:20:39 +03:00
} ;
static const struct of_device_id gswip_of_match [ ] = {
{ . compatible = " lantiq,xrx200-gswip " , . data = & gswip_xrx200 } ,
2021-03-22 23:37:15 +03:00
{ . compatible = " lantiq,xrx300-gswip " , . data = & gswip_xrx300 } ,
{ . compatible = " lantiq,xrx330-gswip " , . data = & gswip_xrx300 } ,
2018-09-09 23:20:39 +03:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , gswip_of_match ) ;
static struct platform_driver gswip_driver = {
. probe = gswip_probe ,
. remove = gswip_remove ,
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
. shutdown = gswip_shutdown ,
2018-09-09 23:20:39 +03:00
. driver = {
. name = " gswip " ,
. of_match_table = gswip_of_match ,
} ,
} ;
module_platform_driver ( gswip_driver ) ;
2019-02-22 22:11:13 +03:00
MODULE_FIRMWARE ( " lantiq/xrx300_phy11g_a21.bin " ) ;
MODULE_FIRMWARE ( " lantiq/xrx300_phy22f_a21.bin " ) ;
MODULE_FIRMWARE ( " lantiq/xrx200_phy11g_a14.bin " ) ;
MODULE_FIRMWARE ( " lantiq/xrx200_phy11g_a22.bin " ) ;
MODULE_FIRMWARE ( " lantiq/xrx200_phy22f_a14.bin " ) ;
MODULE_FIRMWARE ( " lantiq/xrx200_phy22f_a22.bin " ) ;
2018-09-09 23:20:39 +03:00
MODULE_AUTHOR ( " Hauke Mehrtens <hauke@hauke-m.de> " ) ;
MODULE_DESCRIPTION ( " Lantiq / Intel GSWIP driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;