2019-05-27 08:55:21 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2017-04-07 16:45:09 +08:00
/*
* Mediatek MT7530 DSA Switch driver
* Copyright ( C ) 2017 Sean Wang < sean . wang @ mediatek . com >
*/
# include <linux/etherdevice.h>
# include <linux/if_bridge.h>
# include <linux/iopoll.h>
# include <linux/mdio.h>
# include <linux/mfd/syscon.h>
# include <linux/module.h>
# include <linux/netdevice.h>
2021-05-19 11:32:00 +08:00
# include <linux/of_irq.h>
2017-04-07 16:45:09 +08:00
# include <linux/of_mdio.h>
# include <linux/of_net.h>
# include <linux/of_platform.h>
2019-09-02 15:02:24 +02:00
# include <linux/phylink.h>
2017-04-07 16:45:09 +08:00
# include <linux/regmap.h>
# include <linux/regulator/consumer.h>
# include <linux/reset.h>
2017-04-08 08:52:02 -07:00
# include <linux/gpio/consumer.h>
2021-01-25 12:43:22 +08:00
# include <linux/gpio/driver.h>
2017-04-07 16:45:09 +08:00
# include <net/dsa.h>
# include "mt7530.h"
2022-04-11 10:46:27 +01:00
static struct mt753x_pcs * pcs_to_mt753x_pcs ( struct phylink_pcs * pcs )
{
return container_of ( pcs , struct mt753x_pcs , pcs ) ;
}
2017-04-07 16:45:09 +08:00
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib [ ] = {
MIB_DESC ( 1 , 0x00 , " TxDrop " ) ,
MIB_DESC ( 1 , 0x04 , " TxCrcErr " ) ,
MIB_DESC ( 1 , 0x08 , " TxUnicast " ) ,
MIB_DESC ( 1 , 0x0c , " TxMulticast " ) ,
MIB_DESC ( 1 , 0x10 , " TxBroadcast " ) ,
MIB_DESC ( 1 , 0x14 , " TxCollision " ) ,
MIB_DESC ( 1 , 0x18 , " TxSingleCollision " ) ,
MIB_DESC ( 1 , 0x1c , " TxMultipleCollision " ) ,
MIB_DESC ( 1 , 0x20 , " TxDeferred " ) ,
MIB_DESC ( 1 , 0x24 , " TxLateCollision " ) ,
MIB_DESC ( 1 , 0x28 , " TxExcessiveCollistion " ) ,
MIB_DESC ( 1 , 0x2c , " TxPause " ) ,
MIB_DESC ( 1 , 0x30 , " TxPktSz64 " ) ,
MIB_DESC ( 1 , 0x34 , " TxPktSz65To127 " ) ,
MIB_DESC ( 1 , 0x38 , " TxPktSz128To255 " ) ,
MIB_DESC ( 1 , 0x3c , " TxPktSz256To511 " ) ,
MIB_DESC ( 1 , 0x40 , " TxPktSz512To1023 " ) ,
MIB_DESC ( 1 , 0x44 , " Tx1024ToMax " ) ,
MIB_DESC ( 2 , 0x48 , " TxBytes " ) ,
MIB_DESC ( 1 , 0x60 , " RxDrop " ) ,
MIB_DESC ( 1 , 0x64 , " RxFiltering " ) ,
2021-08-06 12:05:27 +08:00
MIB_DESC ( 1 , 0x68 , " RxUnicast " ) ,
2017-04-07 16:45:09 +08:00
MIB_DESC ( 1 , 0x6c , " RxMulticast " ) ,
MIB_DESC ( 1 , 0x70 , " RxBroadcast " ) ,
MIB_DESC ( 1 , 0x74 , " RxAlignErr " ) ,
MIB_DESC ( 1 , 0x78 , " RxCrcErr " ) ,
MIB_DESC ( 1 , 0x7c , " RxUnderSizeErr " ) ,
MIB_DESC ( 1 , 0x80 , " RxFragErr " ) ,
MIB_DESC ( 1 , 0x84 , " RxOverSzErr " ) ,
MIB_DESC ( 1 , 0x88 , " RxJabberErr " ) ,
MIB_DESC ( 1 , 0x8c , " RxPause " ) ,
MIB_DESC ( 1 , 0x90 , " RxPktSz64 " ) ,
MIB_DESC ( 1 , 0x94 , " RxPktSz65To127 " ) ,
MIB_DESC ( 1 , 0x98 , " RxPktSz128To255 " ) ,
MIB_DESC ( 1 , 0x9c , " RxPktSz256To511 " ) ,
MIB_DESC ( 1 , 0xa0 , " RxPktSz512To1023 " ) ,
MIB_DESC ( 1 , 0xa4 , " RxPktSz1024ToMax " ) ,
MIB_DESC ( 2 , 0xa8 , " RxBytes " ) ,
MIB_DESC ( 1 , 0xb0 , " RxCtrlDrop " ) ,
MIB_DESC ( 1 , 0xb4 , " RxIngressDrop " ) ,
MIB_DESC ( 1 , 0xb8 , " RxArlDrop " ) ,
} ;
2021-03-26 23:07:52 -07:00
/* Since phy_device has not yet been created and
* phy_ { read , write } _mmd_indirect is not available , we provide our own
* core_ { read , write } _mmd_indirect with core_ { clear , write , set } wrappers
* to complete this function .
*/
2017-04-07 16:45:09 +08:00
static int
core_read_mmd_indirect ( struct mt7530_priv * priv , int prtad , int devad )
{
struct mii_bus * bus = priv - > bus ;
int value , ret ;
/* Write the desired MMD Devad */
ret = bus - > write ( bus , 0 , MII_MMD_CTRL , devad ) ;
if ( ret < 0 )
goto err ;
/* Write the desired MMD register address */
ret = bus - > write ( bus , 0 , MII_MMD_DATA , prtad ) ;
if ( ret < 0 )
goto err ;
/* Select the Function : DATA with no post increment */
ret = bus - > write ( bus , 0 , MII_MMD_CTRL , ( devad | MII_MMD_CTRL_NOINCR ) ) ;
if ( ret < 0 )
goto err ;
/* Read the content of the MMD's selected register */
value = bus - > read ( bus , 0 , MII_MMD_DATA ) ;
return value ;
err :
dev_err ( & bus - > dev , " failed to read mmd register \n " ) ;
return ret ;
}
static int
core_write_mmd_indirect ( struct mt7530_priv * priv , int prtad ,
int devad , u32 data )
{
struct mii_bus * bus = priv - > bus ;
int ret ;
/* Write the desired MMD Devad */
ret = bus - > write ( bus , 0 , MII_MMD_CTRL , devad ) ;
if ( ret < 0 )
goto err ;
/* Write the desired MMD register address */
ret = bus - > write ( bus , 0 , MII_MMD_DATA , prtad ) ;
if ( ret < 0 )
goto err ;
/* Select the Function : DATA with no post increment */
ret = bus - > write ( bus , 0 , MII_MMD_CTRL , ( devad | MII_MMD_CTRL_NOINCR ) ) ;
if ( ret < 0 )
goto err ;
/* Write the data into MMD's selected register */
ret = bus - > write ( bus , 0 , MII_MMD_DATA , data ) ;
err :
if ( ret < 0 )
dev_err ( & bus - > dev ,
" failed to write mmd register \n " ) ;
return ret ;
}
static void
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( struct mt7530_priv * priv )
2017-04-07 16:45:09 +08:00
{
2023-04-03 02:19:28 +01:00
if ( priv - > bus )
mutex_lock_nested ( & priv - > bus - > mdio_lock , MDIO_MUTEX_NESTED ) ;
2023-04-03 02:18:16 +01:00
}
2017-04-07 16:45:09 +08:00
2023-04-03 02:18:16 +01:00
static void
mt7530_mutex_unlock ( struct mt7530_priv * priv )
{
2023-04-03 02:19:28 +01:00
if ( priv - > bus )
mutex_unlock ( & priv - > bus - > mdio_lock ) ;
2023-04-03 02:18:16 +01:00
}
static void
core_write ( struct mt7530_priv * priv , u32 reg , u32 val )
{
mt7530_mutex_lock ( priv ) ;
2017-04-07 16:45:09 +08:00
core_write_mmd_indirect ( priv , reg , MDIO_MMD_VEND2 , val ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2017-04-07 16:45:09 +08:00
}
static void
core_rmw ( struct mt7530_priv * priv , u32 reg , u32 mask , u32 set )
{
u32 val ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2017-04-07 16:45:09 +08:00
val = core_read_mmd_indirect ( priv , reg , MDIO_MMD_VEND2 ) ;
val & = ~ mask ;
val | = set ;
core_write_mmd_indirect ( priv , reg , MDIO_MMD_VEND2 , val ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2017-04-07 16:45:09 +08:00
}
static void
core_set ( struct mt7530_priv * priv , u32 reg , u32 val )
{
core_rmw ( priv , reg , 0 , val ) ;
}
static void
core_clear ( struct mt7530_priv * priv , u32 reg , u32 val )
{
core_rmw ( priv , reg , val , 0 ) ;
}
2023-04-03 02:17:52 +01:00
static int
mt7530_mii_write ( struct mt7530_priv * priv , u32 reg , u32 val )
{
int ret ;
ret = regmap_write ( priv - > regmap , reg , val ) ;
2017-04-07 16:45:09 +08:00
if ( ret < 0 )
2023-04-03 02:17:52 +01:00
dev_err ( priv - > dev ,
2017-04-07 16:45:09 +08:00
" failed to write mt7530 register \n " ) ;
2023-04-03 02:17:52 +01:00
2017-04-07 16:45:09 +08:00
return ret ;
}
2023-04-03 02:17:52 +01:00
static u32
mt7530_mii_read ( struct mt7530_priv * priv , u32 reg )
{
int ret ;
u32 val ;
ret = regmap_read ( priv - > regmap , reg , & val ) ;
if ( ret ) {
2023-04-03 02:17:19 +01:00
WARN_ON_ONCE ( 1 ) ;
2023-04-03 02:17:52 +01:00
dev_err ( priv - > dev ,
2017-04-07 16:45:09 +08:00
" failed to read mt7530 register \n " ) ;
2023-04-03 02:17:19 +01:00
return 0 ;
2017-04-07 16:45:09 +08:00
}
2023-04-03 02:17:52 +01:00
return val ;
2017-04-07 16:45:09 +08:00
}
static void
mt7530_write ( struct mt7530_priv * priv , u32 reg , u32 val )
{
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2017-04-07 16:45:09 +08:00
mt7530_mii_write ( priv , reg , val ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2017-04-07 16:45:09 +08:00
}
2020-09-11 21:48:54 +08:00
static u32
_mt7530_unlocked_read ( struct mt7530_dummy_poll * p )
{
return mt7530_mii_read ( p - > priv , p - > reg ) ;
}
2017-04-07 16:45:09 +08:00
static u32
_mt7530_read ( struct mt7530_dummy_poll * p )
{
u32 val ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( p - > priv ) ;
2017-04-07 16:45:09 +08:00
val = mt7530_mii_read ( p - > priv , p - > reg ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( p - > priv ) ;
2017-04-07 16:45:09 +08:00
return val ;
}
static u32
mt7530_read ( struct mt7530_priv * priv , u32 reg )
{
struct mt7530_dummy_poll p ;
INIT_MT7530_DUMMY_POLL ( & p , priv , reg ) ;
return _mt7530_read ( & p ) ;
}
static void
mt7530_rmw ( struct mt7530_priv * priv , u32 reg ,
u32 mask , u32 set )
{
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2017-04-07 16:45:09 +08:00
2023-04-03 02:17:52 +01:00
regmap_update_bits ( priv - > regmap , reg , mask , set ) ;
2017-04-07 16:45:09 +08:00
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2017-04-07 16:45:09 +08:00
}
static void
mt7530_set ( struct mt7530_priv * priv , u32 reg , u32 val )
{
2023-04-03 02:17:52 +01:00
mt7530_rmw ( priv , reg , val , val ) ;
2017-04-07 16:45:09 +08:00
}
static void
mt7530_clear ( struct mt7530_priv * priv , u32 reg , u32 val )
{
mt7530_rmw ( priv , reg , val , 0 ) ;
}
static int
mt7530_fdb_cmd ( struct mt7530_priv * priv , enum mt7530_fdb_cmd cmd , u32 * rsp )
{
u32 val ;
int ret ;
struct mt7530_dummy_poll p ;
/* Set the command operating upon the MAC address entries */
val = ATC_BUSY | ATC_MAT ( 0 ) | cmd ;
mt7530_write ( priv , MT7530_ATC , val ) ;
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7530_ATC ) ;
ret = readx_poll_timeout ( _mt7530_read , & p , val ,
! ( val & ATC_BUSY ) , 20 , 20000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " reset timeout \n " ) ;
return ret ;
}
/* Additional sanity for read command if the specified
* entry is invalid
*/
val = mt7530_read ( priv , MT7530_ATC ) ;
if ( ( cmd = = MT7530_FDB_READ ) & & ( val & ATC_INVALID ) )
return - EINVAL ;
if ( rsp )
* rsp = val ;
return 0 ;
}
static void
mt7530_fdb_read ( struct mt7530_priv * priv , struct mt7530_fdb * fdb )
{
u32 reg [ 3 ] ;
int i ;
/* Read from ARL table into an array */
for ( i = 0 ; i < 3 ; i + + ) {
reg [ i ] = mt7530_read ( priv , MT7530_TSRA1 + ( i * 4 ) ) ;
dev_dbg ( priv - > dev , " %s(%d) reg[%d]=0x%x \n " ,
__func__ , __LINE__ , i , reg [ i ] ) ;
}
fdb - > vid = ( reg [ 1 ] > > CVID ) & CVID_MASK ;
fdb - > aging = ( reg [ 2 ] > > AGE_TIMER ) & AGE_TIMER_MASK ;
fdb - > port_mask = ( reg [ 2 ] > > PORT_MAP ) & PORT_MAP_MASK ;
fdb - > mac [ 0 ] = ( reg [ 0 ] > > MAC_BYTE_0 ) & MAC_BYTE_MASK ;
fdb - > mac [ 1 ] = ( reg [ 0 ] > > MAC_BYTE_1 ) & MAC_BYTE_MASK ;
fdb - > mac [ 2 ] = ( reg [ 0 ] > > MAC_BYTE_2 ) & MAC_BYTE_MASK ;
fdb - > mac [ 3 ] = ( reg [ 0 ] > > MAC_BYTE_3 ) & MAC_BYTE_MASK ;
fdb - > mac [ 4 ] = ( reg [ 1 ] > > MAC_BYTE_4 ) & MAC_BYTE_MASK ;
fdb - > mac [ 5 ] = ( reg [ 1 ] > > MAC_BYTE_5 ) & MAC_BYTE_MASK ;
fdb - > noarp = ( ( reg [ 2 ] > > ENT_STATUS ) & ENT_STATUS_MASK ) = = STATIC_ENT ;
}
static void
mt7530_fdb_write ( struct mt7530_priv * priv , u16 vid ,
u8 port_mask , const u8 * mac ,
u8 aging , u8 type )
{
u32 reg [ 3 ] = { 0 } ;
int i ;
reg [ 1 ] | = vid & CVID_MASK ;
2021-08-04 00:04:04 +08:00
reg [ 1 ] | = ATA2_IVL ;
reg [ 1 ] | = ATA2_FID ( FID_BRIDGED ) ;
2017-04-07 16:45:09 +08:00
reg [ 2 ] | = ( aging & AGE_TIMER_MASK ) < < AGE_TIMER ;
reg [ 2 ] | = ( port_mask & PORT_MAP_MASK ) < < PORT_MAP ;
/* STATIC_ENT indicate that entry is static wouldn't
* be aged out and STATIC_EMP specified as erasing an
* entry
*/
reg [ 2 ] | = ( type & ENT_STATUS_MASK ) < < ENT_STATUS ;
reg [ 1 ] | = mac [ 5 ] < < MAC_BYTE_5 ;
reg [ 1 ] | = mac [ 4 ] < < MAC_BYTE_4 ;
reg [ 0 ] | = mac [ 3 ] < < MAC_BYTE_3 ;
reg [ 0 ] | = mac [ 2 ] < < MAC_BYTE_2 ;
reg [ 0 ] | = mac [ 1 ] < < MAC_BYTE_1 ;
reg [ 0 ] | = mac [ 0 ] < < MAC_BYTE_0 ;
/* Write array into the ARL table */
for ( i = 0 ; i < 3 ; i + + )
mt7530_write ( priv , MT7530_ATA1 + ( i * 4 ) , reg [ i ] ) ;
}
net: dsa: mt7530: permit port 5 to work without port 6 on MT7621 SoC
The MT7530 switch from the MT7621 SoC has 2 ports which can be set up as
internal: port 5 and 6. Arınç reports that the GMAC1 attached to port 5
receives corrupted frames, unless port 6 (attached to GMAC0) has been
brought up by the driver. This is true regardless of whether port 5 is
used as a user port or as a CPU port (carrying DSA tags).
Offline debugging (blind for me) which began in the linked thread showed
experimentally that the configuration done by the driver for port 6
contains a step which is needed by port 5 as well - the write to
CORE_GSWPLL_GRP2 (note that I've no idea as to what it does, apart from
the comment "Set core clock into 500Mhz"). Prints put by Arınç show that
the reset value of CORE_GSWPLL_GRP2 is RG_GSWPLL_POSDIV_500M(1) |
RG_GSWPLL_FBKDIV_500M(40) (0x128), both on the MCM MT7530 from the
MT7621 SoC, as well as on the standalone MT7530 from MT7623NI Bananapi
BPI-R2. Apparently, port 5 on the standalone MT7530 can work under both
values of the register, while on the MT7621 SoC it cannot.
The call path that triggers the register write is:
mt753x_phylink_mac_config() for port 6
-> mt753x_pad_setup()
-> mt7530_pad_clk_setup()
so this fully explains the behavior noticed by Arınç, that bringing port
6 up is necessary.
The simplest fix for the problem is to extract the register writes which
are needed for both port 5 and 6 into a common mt7530_pll_setup()
function, which is called at mt7530_setup() time, immediately after
switch reset. We can argue that this mirrors the code layout introduced
in mt7531_setup() by commit 42bc4fafe359 ("net: mt7531: only do PLL once
after the reset"), in that the PLL setup has the exact same positioning,
and further work to consolidate the separate setup() functions is not
hindered.
Testing confirms that:
- the slight reordering of writes to MT7530_P6ECR and to
CORE_GSWPLL_GRP1 / CORE_GSWPLL_GRP2 introduced by this change does not
appear to cause problems for the operation of port 6 on MT7621 and on
MT7623 (where port 5 also always worked)
- packets sent through port 5 are not corrupted anymore, regardless of
whether port 6 is enabled by phylink or not (or even present in the
device tree)
My algorithm for determining the Fixes: tag is as follows. Testing shows
that some logic from mt7530_pad_clk_setup() is needed even for port 5.
Prior to commit ca366d6c889b ("net: dsa: mt7530: Convert to PHYLINK
API"), a call did exist for all phy_is_pseudo_fixed_link() ports - so
port 5 included. That commit replaced it with a temporary "Port 5 is not
supported!" comment, and the following commit 38f790a80560 ("net: dsa:
mt7530: Add support for port 5") replaced that comment with a
configuration procedure in mt7530_setup_port5() which was insufficient
for port 5 to work. I'm laying the blame on the patch that claimed
support for port 5, although one would have also needed the change from
commit c3b8e07909db ("net: dsa: mt7530: setup core clock even in TRGMII
mode") for the write to be performed completely independently from port
6's configuration.
Thanks go to Arınç for describing the problem, for debugging and for
testing.
Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Link: https://lore.kernel.org/netdev/f297c2c4-6e7c-57ac-2394-f6025d309b9d@arinc9.com/
Fixes: 38f790a80560 ("net: dsa: mt7530: Add support for port 5")
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230307155411.868573-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-03-07 17:54:11 +02:00
/* Set up switch core clock for MT7530 */
static void mt7530_pll_setup ( struct mt7530_priv * priv )
{
2023-03-20 22:05:18 +03:00
/* Disable core clock */
core_clear ( priv , CORE_TRGMII_GSW_CLK_CG , REG_GSWCK_EN ) ;
net: dsa: mt7530: permit port 5 to work without port 6 on MT7621 SoC
The MT7530 switch from the MT7621 SoC has 2 ports which can be set up as
internal: port 5 and 6. Arınç reports that the GMAC1 attached to port 5
receives corrupted frames, unless port 6 (attached to GMAC0) has been
brought up by the driver. This is true regardless of whether port 5 is
used as a user port or as a CPU port (carrying DSA tags).
Offline debugging (blind for me) which began in the linked thread showed
experimentally that the configuration done by the driver for port 6
contains a step which is needed by port 5 as well - the write to
CORE_GSWPLL_GRP2 (note that I've no idea as to what it does, apart from
the comment "Set core clock into 500Mhz"). Prints put by Arınç show that
the reset value of CORE_GSWPLL_GRP2 is RG_GSWPLL_POSDIV_500M(1) |
RG_GSWPLL_FBKDIV_500M(40) (0x128), both on the MCM MT7530 from the
MT7621 SoC, as well as on the standalone MT7530 from MT7623NI Bananapi
BPI-R2. Apparently, port 5 on the standalone MT7530 can work under both
values of the register, while on the MT7621 SoC it cannot.
The call path that triggers the register write is:
mt753x_phylink_mac_config() for port 6
-> mt753x_pad_setup()
-> mt7530_pad_clk_setup()
so this fully explains the behavior noticed by Arınç, that bringing port
6 up is necessary.
The simplest fix for the problem is to extract the register writes which
are needed for both port 5 and 6 into a common mt7530_pll_setup()
function, which is called at mt7530_setup() time, immediately after
switch reset. We can argue that this mirrors the code layout introduced
in mt7531_setup() by commit 42bc4fafe359 ("net: mt7531: only do PLL once
after the reset"), in that the PLL setup has the exact same positioning,
and further work to consolidate the separate setup() functions is not
hindered.
Testing confirms that:
- the slight reordering of writes to MT7530_P6ECR and to
CORE_GSWPLL_GRP1 / CORE_GSWPLL_GRP2 introduced by this change does not
appear to cause problems for the operation of port 6 on MT7621 and on
MT7623 (where port 5 also always worked)
- packets sent through port 5 are not corrupted anymore, regardless of
whether port 6 is enabled by phylink or not (or even present in the
device tree)
My algorithm for determining the Fixes: tag is as follows. Testing shows
that some logic from mt7530_pad_clk_setup() is needed even for port 5.
Prior to commit ca366d6c889b ("net: dsa: mt7530: Convert to PHYLINK
API"), a call did exist for all phy_is_pseudo_fixed_link() ports - so
port 5 included. That commit replaced it with a temporary "Port 5 is not
supported!" comment, and the following commit 38f790a80560 ("net: dsa:
mt7530: Add support for port 5") replaced that comment with a
configuration procedure in mt7530_setup_port5() which was insufficient
for port 5 to work. I'm laying the blame on the patch that claimed
support for port 5, although one would have also needed the change from
commit c3b8e07909db ("net: dsa: mt7530: setup core clock even in TRGMII
mode") for the write to be performed completely independently from port
6's configuration.
Thanks go to Arınç for describing the problem, for debugging and for
testing.
Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Link: https://lore.kernel.org/netdev/f297c2c4-6e7c-57ac-2394-f6025d309b9d@arinc9.com/
Fixes: 38f790a80560 ("net: dsa: mt7530: Add support for port 5")
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230307155411.868573-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-03-07 17:54:11 +02:00
/* Disable PLL */
core_write ( priv , CORE_GSWPLL_GRP1 , 0 ) ;
/* Set core clock into 500Mhz */
core_write ( priv , CORE_GSWPLL_GRP2 ,
RG_GSWPLL_POSDIV_500M ( 1 ) |
RG_GSWPLL_FBKDIV_500M ( 25 ) ) ;
/* Enable PLL */
core_write ( priv , CORE_GSWPLL_GRP1 ,
RG_GSWPLL_EN_PRE |
RG_GSWPLL_POSDIV_200M ( 2 ) |
RG_GSWPLL_FBKDIV_200M ( 32 ) ) ;
2023-03-20 22:05:18 +03:00
udelay ( 20 ) ;
/* Enable core clock */
core_set ( priv , CORE_TRGMII_GSW_CLK_CG , REG_GSWCK_EN ) ;
net: dsa: mt7530: permit port 5 to work without port 6 on MT7621 SoC
The MT7530 switch from the MT7621 SoC has 2 ports which can be set up as
internal: port 5 and 6. Arınç reports that the GMAC1 attached to port 5
receives corrupted frames, unless port 6 (attached to GMAC0) has been
brought up by the driver. This is true regardless of whether port 5 is
used as a user port or as a CPU port (carrying DSA tags).
Offline debugging (blind for me) which began in the linked thread showed
experimentally that the configuration done by the driver for port 6
contains a step which is needed by port 5 as well - the write to
CORE_GSWPLL_GRP2 (note that I've no idea as to what it does, apart from
the comment "Set core clock into 500Mhz"). Prints put by Arınç show that
the reset value of CORE_GSWPLL_GRP2 is RG_GSWPLL_POSDIV_500M(1) |
RG_GSWPLL_FBKDIV_500M(40) (0x128), both on the MCM MT7530 from the
MT7621 SoC, as well as on the standalone MT7530 from MT7623NI Bananapi
BPI-R2. Apparently, port 5 on the standalone MT7530 can work under both
values of the register, while on the MT7621 SoC it cannot.
The call path that triggers the register write is:
mt753x_phylink_mac_config() for port 6
-> mt753x_pad_setup()
-> mt7530_pad_clk_setup()
so this fully explains the behavior noticed by Arınç, that bringing port
6 up is necessary.
The simplest fix for the problem is to extract the register writes which
are needed for both port 5 and 6 into a common mt7530_pll_setup()
function, which is called at mt7530_setup() time, immediately after
switch reset. We can argue that this mirrors the code layout introduced
in mt7531_setup() by commit 42bc4fafe359 ("net: mt7531: only do PLL once
after the reset"), in that the PLL setup has the exact same positioning,
and further work to consolidate the separate setup() functions is not
hindered.
Testing confirms that:
- the slight reordering of writes to MT7530_P6ECR and to
CORE_GSWPLL_GRP1 / CORE_GSWPLL_GRP2 introduced by this change does not
appear to cause problems for the operation of port 6 on MT7621 and on
MT7623 (where port 5 also always worked)
- packets sent through port 5 are not corrupted anymore, regardless of
whether port 6 is enabled by phylink or not (or even present in the
device tree)
My algorithm for determining the Fixes: tag is as follows. Testing shows
that some logic from mt7530_pad_clk_setup() is needed even for port 5.
Prior to commit ca366d6c889b ("net: dsa: mt7530: Convert to PHYLINK
API"), a call did exist for all phy_is_pseudo_fixed_link() ports - so
port 5 included. That commit replaced it with a temporary "Port 5 is not
supported!" comment, and the following commit 38f790a80560 ("net: dsa:
mt7530: Add support for port 5") replaced that comment with a
configuration procedure in mt7530_setup_port5() which was insufficient
for port 5 to work. I'm laying the blame on the patch that claimed
support for port 5, although one would have also needed the change from
commit c3b8e07909db ("net: dsa: mt7530: setup core clock even in TRGMII
mode") for the write to be performed completely independently from port
6's configuration.
Thanks go to Arınç for describing the problem, for debugging and for
testing.
Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Link: https://lore.kernel.org/netdev/f297c2c4-6e7c-57ac-2394-f6025d309b9d@arinc9.com/
Fixes: 38f790a80560 ("net: dsa: mt7530: Add support for port 5")
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230307155411.868573-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-03-07 17:54:11 +02:00
}
net: dsa: introduce preferred_default_local_cpu_port and use on MT7530
Since the introduction of the OF bindings, DSA has always had a policy that
in case multiple CPU ports are present in the device tree, the numerically
smallest one is always chosen.
The MT7530 switch family, except the switch on the MT7988 SoC, has 2 CPU
ports, 5 and 6, where port 6 is preferable on the MT7531BE switch because
it has higher bandwidth.
The MT7530 driver developers had 3 options:
- to modify DSA when the MT7531 switch support was introduced, such as to
prefer the better port
- to declare both CPU ports in device trees as CPU ports, and live with the
sub-optimal performance resulting from not preferring the better port
- to declare just port 6 in the device tree as a CPU port
Of course they chose the path of least resistance (3rd option), kicking the
can down the road. The hardware description in the device tree is supposed
to be stable - developers are not supposed to adopt the strategy of
piecemeal hardware description, where the device tree is updated in
lockstep with the features that the kernel currently supports.
Now, as a result of the fact that they did that, any attempts to modify the
device tree and describe both CPU ports as CPU ports would make DSA change
its default selection from port 6 to 5, effectively resulting in a
performance degradation visible to users with the MT7531BE switch as can be
seen below.
Without preferring port 6:
[ ID][Role] Interval Transfer Bitrate Retr
[ 5][TX-C] 0.00-20.00 sec 374 MBytes 157 Mbits/sec 734 sender
[ 5][TX-C] 0.00-20.00 sec 373 MBytes 156 Mbits/sec receiver
[ 7][RX-C] 0.00-20.00 sec 1.81 GBytes 778 Mbits/sec 0 sender
[ 7][RX-C] 0.00-20.00 sec 1.81 GBytes 777 Mbits/sec receiver
With preferring port 6:
[ ID][Role] Interval Transfer Bitrate Retr
[ 5][TX-C] 0.00-20.00 sec 1.99 GBytes 856 Mbits/sec 273 sender
[ 5][TX-C] 0.00-20.00 sec 1.99 GBytes 855 Mbits/sec receiver
[ 7][RX-C] 0.00-20.00 sec 1.72 GBytes 737 Mbits/sec 15 sender
[ 7][RX-C] 0.00-20.00 sec 1.71 GBytes 736 Mbits/sec receiver
Using one port for WAN and the other ports for LAN is a very popular use
case which is what this test emulates.
As such, this change proposes that we retroactively modify stable kernels
(which don't support the modification of the CPU port assignments, so as to
let user space fix the problem and restore the throughput) to keep the
mt7530 driver preferring port 6 even with device trees where the hardware
is more fully described.
Fixes: c288575f7810 ("net: dsa: mt7530: Add the support of MT7531 switch")
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-06-17 09:26:48 +03:00
/* If port 6 is available as a CPU port, always prefer that as the default,
* otherwise don ' t care .
*/
static struct dsa_port *
mt753x_preferred_default_local_cpu_port ( struct dsa_switch * ds )
{
struct dsa_port * cpu_dp = dsa_to_port ( ds , 6 ) ;
if ( dsa_port_is_cpu ( cpu_dp ) )
return cpu_dp ;
return NULL ;
}
2023-03-20 22:05:19 +03:00
/* Setup port 6 interface mode and TRGMII TX circuit */
2017-04-07 16:45:09 +08:00
static int
2020-09-11 21:48:52 +08:00
mt7530_pad_clk_setup ( struct dsa_switch * ds , phy_interface_t interface )
2017-04-07 16:45:09 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
2023-03-20 22:05:19 +03:00
u32 ncpo1 , ssc_delta , trgint , xtal ;
2019-06-20 14:21:55 +02:00
xtal = mt7530_read ( priv , MT7530_MHWTRAP ) & HWTRAP_XTAL_MASK ;
if ( xtal = = HWTRAP_XTAL_20MHZ ) {
dev_err ( priv - > dev ,
" %s: MT7530 with a 20MHz XTAL is not supported! \n " ,
__func__ ) ;
return - EINVAL ;
}
2017-04-07 16:45:09 +08:00
2020-09-11 21:48:52 +08:00
switch ( interface ) {
2017-04-07 16:45:09 +08:00
case PHY_INTERFACE_MODE_RGMII :
trgint = 0 ;
break ;
case PHY_INTERFACE_MODE_TRGMII :
trgint = 1 ;
2023-03-20 22:05:20 +03:00
if ( xtal = = HWTRAP_XTAL_25MHZ )
ssc_delta = 0x57 ;
else
ssc_delta = 0x87 ;
2019-06-20 14:21:55 +02:00
if ( priv - > id = = ID_MT7621 ) {
2023-05-03 00:09:46 +03:00
/* PLL frequency: 125MHz: 1.0GBit */
2019-06-20 14:21:55 +02:00
if ( xtal = = HWTRAP_XTAL_40MHZ )
2023-05-03 00:09:46 +03:00
ncpo1 = 0x0640 ;
2019-06-20 14:21:55 +02:00
if ( xtal = = HWTRAP_XTAL_25MHZ )
ncpo1 = 0x0a00 ;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
if ( xtal = = HWTRAP_XTAL_40MHZ )
ncpo1 = 0x0c80 ;
if ( xtal = = HWTRAP_XTAL_25MHZ )
ncpo1 = 0x1400 ;
}
2017-04-07 16:45:09 +08:00
break ;
default :
2020-09-11 21:48:52 +08:00
dev_err ( priv - > dev , " xMII interface %d not supported \n " ,
interface ) ;
2017-04-07 16:45:09 +08:00
return - EINVAL ;
}
mt7530_rmw ( priv , MT7530_P6ECR , P6_INTF_MODE_MASK ,
P6_INTF_MODE ( trgint ) ) ;
2023-03-10 10:33:38 +03:00
if ( trgint ) {
2023-03-20 22:05:18 +03:00
/* Disable the MT7530 TRGMII clocks */
core_clear ( priv , CORE_TRGMII_GSW_CLK_CG , REG_TRGMIICK_EN ) ;
2023-03-10 10:33:38 +03:00
/* Setup the MT7530 TRGMII Tx Clock */
core_write ( priv , CORE_PLL_GROUP5 , RG_LCDDS_PCW_NCPO1 ( ncpo1 ) ) ;
core_write ( priv , CORE_PLL_GROUP6 , RG_LCDDS_PCW_NCPO0 ( 0 ) ) ;
core_write ( priv , CORE_PLL_GROUP10 , RG_LCDDS_SSC_DELTA ( ssc_delta ) ) ;
core_write ( priv , CORE_PLL_GROUP11 , RG_LCDDS_SSC_DELTA1 ( ssc_delta ) ) ;
core_write ( priv , CORE_PLL_GROUP4 ,
RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
RG_SYSPLL_BIAS_LPF_EN ) ;
core_write ( priv , CORE_PLL_GROUP2 ,
RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
RG_SYSPLL_POSDIV ( 1 ) ) ;
core_write ( priv , CORE_PLL_GROUP7 ,
RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C ( 3 ) |
RG_LCDDS_PWDB | RG_LCDDS_ISO_EN ) ;
2023-03-20 22:05:18 +03:00
/* Enable the MT7530 TRGMII clocks */
core_set ( priv , CORE_TRGMII_GSW_CLK_CG , REG_TRGMIICK_EN ) ;
2023-03-10 10:33:38 +03:00
}
2017-04-07 16:45:09 +08:00
return 0 ;
}
2020-09-11 21:48:54 +08:00
static bool mt7531_dual_sgmii_supported ( struct mt7530_priv * priv )
{
u32 val ;
val = mt7530_read ( priv , MT7531_TOP_SIG_SR ) ;
return ( val & PAD_DUAL_SGMII_EN ) ! = 0 ;
}
static int
mt7531_pad_setup ( struct dsa_switch * ds , phy_interface_t interface )
{
2022-09-17 02:07:33 +02:00
return 0 ;
}
static void
mt7531_pll_setup ( struct mt7530_priv * priv )
{
2020-09-11 21:48:54 +08:00
u32 top_sig ;
u32 hwstrap ;
u32 xtal ;
u32 val ;
if ( mt7531_dual_sgmii_supported ( priv ) )
2022-09-17 02:07:33 +02:00
return ;
2020-09-11 21:48:54 +08:00
val = mt7530_read ( priv , MT7531_CREV ) ;
top_sig = mt7530_read ( priv , MT7531_TOP_SIG_SR ) ;
hwstrap = mt7530_read ( priv , MT7531_HWTRAP ) ;
if ( ( val & CHIP_REV_M ) > 0 )
xtal = ( top_sig & PAD_MCM_SMI_EN ) ? HWTRAP_XTAL_FSEL_40MHZ :
HWTRAP_XTAL_FSEL_25MHZ ;
else
xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK ;
/* Step 1 : Disable MT7531 COREPLL */
val = mt7530_read ( priv , MT7531_PLLGP_EN ) ;
val & = ~ EN_COREPLL ;
mt7530_write ( priv , MT7531_PLLGP_EN , val ) ;
/* Step 2: switch to XTAL output */
val = mt7530_read ( priv , MT7531_PLLGP_EN ) ;
val | = SW_CLKSW ;
mt7530_write ( priv , MT7531_PLLGP_EN , val ) ;
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val & = ~ RG_COREPLL_EN ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
/* Step 3: disable PLLGP and enable program PLLGP */
val = mt7530_read ( priv , MT7531_PLLGP_EN ) ;
val | = SW_PLLGP ;
mt7530_write ( priv , MT7531_PLLGP_EN , val ) ;
/* Step 4: program COREPLL output frequency to 500MHz */
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val & = ~ RG_COREPLL_POSDIV_M ;
val | = 2 < < RG_COREPLL_POSDIV_S ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
usleep_range ( 25 , 35 ) ;
switch ( xtal ) {
case HWTRAP_XTAL_FSEL_25MHZ :
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val & = ~ RG_COREPLL_SDM_PCW_M ;
val | = 0x140000 < < RG_COREPLL_SDM_PCW_S ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
break ;
case HWTRAP_XTAL_FSEL_40MHZ :
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val & = ~ RG_COREPLL_SDM_PCW_M ;
val | = 0x190000 < < RG_COREPLL_SDM_PCW_S ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
break ;
2020-10-31 08:30:47 -07:00
}
2020-09-11 21:48:54 +08:00
/* Set feedback divide ratio update signal to high */
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val | = RG_COREPLL_SDM_PCW_CHG ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
/* Wait for at least 16 XTAL clocks */
usleep_range ( 10 , 20 ) ;
/* Step 5: set feedback divide ratio update signal to low */
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val & = ~ RG_COREPLL_SDM_PCW_CHG ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
/* Enable 325M clock for SGMII */
mt7530_write ( priv , MT7531_ANA_PLLGP_CR5 , 0xad0000 ) ;
/* Enable 250SSC clock for RGMII */
mt7530_write ( priv , MT7531_ANA_PLLGP_CR2 , 0x4f40000 ) ;
/* Step 6: Enable MT7531 PLL */
val = mt7530_read ( priv , MT7531_PLLGP_CR0 ) ;
val | = RG_COREPLL_EN ;
mt7530_write ( priv , MT7531_PLLGP_CR0 , val ) ;
val = mt7530_read ( priv , MT7531_PLLGP_EN ) ;
val | = EN_COREPLL ;
mt7530_write ( priv , MT7531_PLLGP_EN , val ) ;
usleep_range ( 25 , 35 ) ;
}
2017-04-07 16:45:09 +08:00
static void
mt7530_mib_reset ( struct dsa_switch * ds )
{
struct mt7530_priv * priv = ds - > priv ;
mt7530_write ( priv , MT7530_MIB_CCR , CCR_MIB_FLUSH ) ;
mt7530_write ( priv , MT7530_MIB_CCR , CCR_MIB_ACTIVATE ) ;
}
2023-01-17 00:52:16 +01:00
static int mt7530_phy_read_c22 ( struct mt7530_priv * priv , int port , int regnum )
2017-04-07 16:45:09 +08:00
{
return mdiobus_read_nested ( priv - > bus , port , regnum ) ;
}
2023-01-17 00:52:16 +01:00
static int mt7530_phy_write_c22 ( struct mt7530_priv * priv , int port , int regnum ,
u16 val )
2017-04-07 16:45:09 +08:00
{
return mdiobus_write_nested ( priv - > bus , port , regnum , val ) ;
}
2023-01-17 00:52:16 +01:00
static int mt7530_phy_read_c45 ( struct mt7530_priv * priv , int port ,
int devad , int regnum )
{
return mdiobus_c45_read_nested ( priv - > bus , port , devad , regnum ) ;
}
static int mt7530_phy_write_c45 ( struct mt7530_priv * priv , int port , int devad ,
int regnum , u16 val )
{
return mdiobus_c45_write_nested ( priv - > bus , port , devad , regnum , val ) ;
}
2020-09-11 21:48:54 +08:00
static int
mt7531_ind_c45_phy_read ( struct mt7530_priv * priv , int port , int devad ,
int regnum )
{
struct mt7530_dummy_poll p ;
u32 reg , val ;
int ret ;
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7531_PHY_IAC ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2020-09-11 21:48:54 +08:00
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR ( port ) |
MT7531_MDIO_DEV_ADDR ( devad ) | regnum ;
mt7530_mii_write ( priv , MT7531_PHY_IAC , reg | MT7531_PHY_ACS_ST ) ;
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR ( port ) |
MT7531_MDIO_DEV_ADDR ( devad ) ;
mt7530_mii_write ( priv , MT7531_PHY_IAC , reg | MT7531_PHY_ACS_ST ) ;
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
ret = val & MT7531_MDIO_RW_DATA_MASK ;
out :
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2020-09-11 21:48:54 +08:00
return ret ;
}
static int
mt7531_ind_c45_phy_write ( struct mt7530_priv * priv , int port , int devad ,
2023-01-17 00:52:16 +01:00
int regnum , u16 data )
2020-09-11 21:48:54 +08:00
{
struct mt7530_dummy_poll p ;
u32 val , reg ;
int ret ;
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7531_PHY_IAC ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2020-09-11 21:48:54 +08:00
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR ( port ) |
MT7531_MDIO_DEV_ADDR ( devad ) | regnum ;
mt7530_mii_write ( priv , MT7531_PHY_IAC , reg | MT7531_PHY_ACS_ST ) ;
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR ( port ) |
MT7531_MDIO_DEV_ADDR ( devad ) | data ;
mt7530_mii_write ( priv , MT7531_PHY_IAC , reg | MT7531_PHY_ACS_ST ) ;
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
out :
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2020-09-11 21:48:54 +08:00
return ret ;
}
static int
mt7531_ind_c22_phy_read ( struct mt7530_priv * priv , int port , int regnum )
{
struct mt7530_dummy_poll p ;
int ret ;
u32 val ;
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7531_PHY_IAC ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2020-09-11 21:48:54 +08:00
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR ( port ) |
MT7531_MDIO_REG_ADDR ( regnum ) ;
mt7530_mii_write ( priv , MT7531_PHY_IAC , val | MT7531_PHY_ACS_ST ) ;
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , val ,
! ( val & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
ret = val & MT7531_MDIO_RW_DATA_MASK ;
out :
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2020-09-11 21:48:54 +08:00
return ret ;
}
static int
mt7531_ind_c22_phy_write ( struct mt7530_priv * priv , int port , int regnum ,
u16 data )
{
struct mt7530_dummy_poll p ;
int ret ;
u32 reg ;
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7531_PHY_IAC ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2020-09-11 21:48:54 +08:00
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , reg ,
! ( reg & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR ( port ) |
MT7531_MDIO_REG_ADDR ( regnum ) | data ;
mt7530_mii_write ( priv , MT7531_PHY_IAC , reg | MT7531_PHY_ACS_ST ) ;
ret = readx_poll_timeout ( _mt7530_unlocked_read , & p , reg ,
! ( reg & MT7531_PHY_ACS_ST ) , 20 , 100000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
goto out ;
}
out :
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2020-09-11 21:48:54 +08:00
return ret ;
}
static int
2023-01-17 00:52:16 +01:00
mt753x_phy_read_c22 ( struct mii_bus * bus , int port , int regnum )
2020-09-11 21:48:54 +08:00
{
2023-01-17 00:52:16 +01:00
struct mt7530_priv * priv = bus - > priv ;
2020-09-11 21:48:54 +08:00
2023-01-17 00:52:16 +01:00
return priv - > info - > phy_read_c22 ( priv , port , regnum ) ;
2020-09-11 21:48:54 +08:00
}
static int
2023-01-17 00:52:16 +01:00
mt753x_phy_read_c45 ( struct mii_bus * bus , int port , int devad , int regnum )
2020-09-11 21:48:54 +08:00
{
2023-01-17 00:52:16 +01:00
struct mt7530_priv * priv = bus - > priv ;
2020-09-11 21:48:54 +08:00
2023-01-17 00:52:16 +01:00
return priv - > info - > phy_read_c45 ( priv , port , devad , regnum ) ;
2020-09-11 21:48:54 +08:00
}
2021-05-19 11:32:00 +08:00
static int
2023-01-17 00:52:16 +01:00
mt753x_phy_write_c22 ( struct mii_bus * bus , int port , int regnum , u16 val )
2021-05-19 11:32:00 +08:00
{
struct mt7530_priv * priv = bus - > priv ;
2023-01-17 00:52:16 +01:00
return priv - > info - > phy_write_c22 ( priv , port , regnum , val ) ;
2021-05-19 11:32:00 +08:00
}
static int
2023-01-17 00:52:16 +01:00
mt753x_phy_write_c45 ( struct mii_bus * bus , int port , int devad , int regnum ,
u16 val )
2021-05-19 11:32:00 +08:00
{
struct mt7530_priv * priv = bus - > priv ;
2023-01-17 00:52:16 +01:00
return priv - > info - > phy_write_c45 ( priv , port , devad , regnum , val ) ;
2021-05-19 11:32:00 +08:00
}
2017-04-07 16:45:09 +08:00
static void
2018-04-25 12:12:50 -07:00
mt7530_get_strings ( struct dsa_switch * ds , int port , u32 stringset ,
uint8_t * data )
2017-04-07 16:45:09 +08:00
{
int i ;
2018-04-25 12:12:50 -07:00
if ( stringset ! = ETH_SS_STATS )
return ;
2017-04-07 16:45:09 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( mt7530_mib ) ; i + + )
strncpy ( data + i * ETH_GSTRING_LEN , mt7530_mib [ i ] . name ,
ETH_GSTRING_LEN ) ;
}
static void
mt7530_get_ethtool_stats ( struct dsa_switch * ds , int port ,
uint64_t * data )
{
struct mt7530_priv * priv = ds - > priv ;
const struct mt7530_mib_desc * mib ;
u32 reg , i ;
u64 hi ;
for ( i = 0 ; i < ARRAY_SIZE ( mt7530_mib ) ; i + + ) {
mib = & mt7530_mib [ i ] ;
reg = MT7530_PORT_MIB_COUNTER ( port ) + mib - > offset ;
data [ i ] = mt7530_read ( priv , reg ) ;
if ( mib - > size = = 2 ) {
hi = mt7530_read ( priv , reg + 4 ) ;
data [ i ] | = hi < < 32 ;
}
}
}
static int
2018-04-25 12:12:50 -07:00
mt7530_get_sset_count ( struct dsa_switch * ds , int port , int sset )
2017-04-07 16:45:09 +08:00
{
2018-04-25 12:12:50 -07:00
if ( sset ! = ETH_SS_STATS )
return 0 ;
2017-04-07 16:45:09 +08:00
return ARRAY_SIZE ( mt7530_mib ) ;
}
2020-12-08 15:00:28 +08:00
static int
mt7530_set_ageing_time ( struct dsa_switch * ds , unsigned int msecs )
{
struct mt7530_priv * priv = ds - > priv ;
unsigned int secs = msecs / 1000 ;
unsigned int tmp_age_count ;
unsigned int error = - 1 ;
unsigned int age_count ;
unsigned int age_unit ;
/* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */
if ( secs < 1 | | secs > ( AGE_CNT_MAX + 1 ) * ( AGE_UNIT_MAX + 1 ) )
return - ERANGE ;
/* iterate through all possible age_count to find the closest pair */
for ( tmp_age_count = 0 ; tmp_age_count < = AGE_CNT_MAX ; + + tmp_age_count ) {
unsigned int tmp_age_unit = secs / ( tmp_age_count + 1 ) - 1 ;
if ( tmp_age_unit < = AGE_UNIT_MAX ) {
unsigned int tmp_error = secs -
( tmp_age_count + 1 ) * ( tmp_age_unit + 1 ) ;
/* found a closer pair */
if ( error > tmp_error ) {
error = tmp_error ;
age_count = tmp_age_count ;
age_unit = tmp_age_unit ;
}
/* found the exact match, so break the loop */
if ( ! error )
break ;
}
}
mt7530_write ( priv , MT7530_AAC , AGE_CNT ( age_count ) | AGE_UNIT ( age_unit ) ) ;
return 0 ;
}
2023-04-03 02:18:28 +01:00
static const char * p5_intf_modes ( unsigned int p5_interface )
{
switch ( p5_interface ) {
case P5_DISABLED :
return " DISABLED " ;
case P5_INTF_SEL_PHY_P0 :
return " PHY P0 " ;
case P5_INTF_SEL_PHY_P4 :
return " PHY P4 " ;
case P5_INTF_SEL_GMAC5 :
return " GMAC5 " ;
case P5_INTF_SEL_GMAC5_SGMII :
return " GMAC5_SGMII " ;
default :
return " unknown " ;
}
}
2019-09-02 15:02:26 +02:00
static void mt7530_setup_port5 ( struct dsa_switch * ds , phy_interface_t interface )
{
struct mt7530_priv * priv = ds - > priv ;
u8 tx_delay = 0 ;
int val ;
mutex_lock ( & priv - > reg_mutex ) ;
val = mt7530_read ( priv , MT7530_MHWTRAP ) ;
val | = MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS ;
val & = ~ MHWTRAP_P5_RGMII_MODE & ~ MHWTRAP_PHY0_SEL ;
switch ( priv - > p5_intf_sel ) {
case P5_INTF_SEL_PHY_P0 :
/* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
val | = MHWTRAP_PHY0_SEL ;
2020-08-23 17:36:59 -05:00
fallthrough ;
2019-09-02 15:02:26 +02:00
case P5_INTF_SEL_PHY_P4 :
/* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
val & = ~ MHWTRAP_P5_MAC_SEL & ~ MHWTRAP_P5_DIS ;
/* Setup the MAC by default for the cpu port */
mt7530_write ( priv , MT7530_PMCR_P ( 5 ) , 0x56300 ) ;
break ;
case P5_INTF_SEL_GMAC5 :
/* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
val & = ~ MHWTRAP_P5_DIS ;
break ;
case P5_DISABLED :
interface = PHY_INTERFACE_MODE_NA ;
break ;
default :
dev_err ( ds - > dev , " Unsupported p5_intf_sel %d \n " ,
priv - > p5_intf_sel ) ;
goto unlock_exit ;
}
/* Setup RGMII settings */
if ( phy_interface_mode_is_rgmii ( interface ) ) {
val | = MHWTRAP_P5_RGMII_MODE ;
/* P5 RGMII RX Clock Control: delay setting for 1000M */
mt7530_write ( priv , MT7530_P5RGMIIRXCR , CSR_RGMII_EDGE_ALIGN ) ;
/* Don't set delay in DSA mode */
if ( ! dsa_is_dsa_port ( priv - > ds , 5 ) & &
( interface = = PHY_INTERFACE_MODE_RGMII_TXID | |
interface = = PHY_INTERFACE_MODE_RGMII_ID ) )
tx_delay = 4 ; /* n * 0.5 ns */
/* P5 RGMII TX Clock Control: delay x */
mt7530_write ( priv , MT7530_P5RGMIITXCR ,
CSR_RGMII_TXC_CFG ( 0x10 + tx_delay ) ) ;
/* reduce P5 RGMII Tx driving, 8mA */
mt7530_write ( priv , MT7530_IO_DRV_CR ,
P5_IO_CLK_DRV ( 1 ) | P5_IO_DATA_DRV ( 1 ) ) ;
}
mt7530_write ( priv , MT7530_MHWTRAP , val ) ;
dev_dbg ( ds - > dev , " Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s \n " ,
val , p5_intf_modes ( priv - > p5_intf_sel ) , phy_modes ( interface ) ) ;
priv - > p5_interface = interface ;
unlock_exit :
mutex_unlock ( & priv - > reg_mutex ) ;
}
2023-06-17 09:26:46 +03:00
static void
mt753x_trap_frames ( struct mt7530_priv * priv )
{
/* Trap BPDUs to the CPU port(s) */
mt7530_rmw ( priv , MT753X_BPC , MT753X_BPDU_PORT_FW_MASK ,
MT753X_BPDU_CPU_ONLY ) ;
2023-06-17 09:26:47 +03:00
2023-08-13 13:59:17 +03:00
/* Trap 802.1X PAE frames to the CPU port(s) */
mt7530_rmw ( priv , MT753X_BPC , MT753X_PAE_PORT_FW_MASK ,
MT753X_PAE_PORT_FW ( MT753X_BPDU_CPU_ONLY ) ) ;
2023-06-17 09:26:47 +03:00
/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
mt7530_rmw ( priv , MT753X_RGAC2 , MT753X_R0E_PORT_FW_MASK ,
MT753X_R0E_PORT_FW ( MT753X_BPDU_CPU_ONLY ) ) ;
2023-06-17 09:26:46 +03:00
}
2017-04-07 16:45:09 +08:00
static int
2020-09-11 21:48:54 +08:00
mt753x_cpu_port_enable ( struct dsa_switch * ds , int port )
2017-04-07 16:45:09 +08:00
{
2020-09-11 21:48:54 +08:00
struct mt7530_priv * priv = ds - > priv ;
2020-09-19 20:28:10 +01:00
int ret ;
2020-09-11 21:48:54 +08:00
/* Setup max capability of CPU port at first */
2020-09-19 20:28:10 +01:00
if ( priv - > info - > cpu_port_config ) {
ret = priv - > info - > cpu_port_config ( ds , port ) ;
if ( ret )
return ret ;
}
2020-09-11 21:48:54 +08:00
2017-04-07 16:45:09 +08:00
/* Enable Mediatek header mode on the cpu port */
mt7530_write ( priv , MT7530_PVC_P ( port ) ,
PORT_SPEC_TAG ) ;
net: dsa: mt7530: fix network connectivity with multiple CPU ports
On mt753x_cpu_port_enable() there's code that enables flooding for the CPU
port only. Since mt753x_cpu_port_enable() runs twice when both CPU ports
are enabled, port 6 becomes the only port to forward the frames to. But
port 5 is the active port, so no frames received from the user ports will
be forwarded to port 5 which breaks network connectivity.
Every bit of the BC_FFP, UNM_FFP, and UNU_FFP bits represents a port. Fix
this issue by setting the bit that corresponds to the CPU port without
overwriting the other bits.
Clear the bits beforehand only for the MT7531 switch. According to the
documents MT7621 Giga Switch Programming Guide v0.3 and MT7531 Reference
Manual for Development Board v1.0, after reset, the BC_FFP, UNM_FFP, and
UNU_FFP bits are set to 1 for MT7531, 0 for MT7530.
The commit 5e5502e012b8 ("net: dsa: mt7530: fix roaming from DSA user
ports") silently changed the method to set the bits on the MT7530_MFC.
Instead of clearing the relevant bits before mt7530_cpu_port_enable()
which runs under a for loop, the commit started doing it on
mt7530_cpu_port_enable().
Back then, this didn't really matter as only a single CPU port could be
used since the CPU port number was hardcoded. The driver was later changed
with commit 1f9a6abecf53 ("net: dsa: mt7530: get cpu-port via dp->cpu_dp
instead of constant") to retrieve the CPU port via dp->cpu_dp. With that,
this silent change became an issue for when using multiple CPU ports.
Fixes: 5e5502e012b8 ("net: dsa: mt7530: fix roaming from DSA user ports")
Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-05-03 00:09:47 +03:00
/* Enable flooding on the CPU port */
mt7530_set ( priv , MT7530_MFC , BC_FFP ( BIT ( port ) ) | UNM_FFP ( BIT ( port ) ) |
UNU_FFP ( BIT ( port ) ) ) ;
2017-04-07 16:45:09 +08:00
2019-01-30 11:24:05 +10:00
/* Set CPU port number */
2023-06-17 09:26:45 +03:00
if ( priv - > id = = ID_MT7530 | | priv - > id = = ID_MT7621 )
2019-01-30 11:24:05 +10:00
mt7530_rmw ( priv , MT7530_MFC , CPU_MASK , CPU_EN | CPU_PORT ( port ) ) ;
2023-06-17 09:26:44 +03:00
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC . Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port .
*/
if ( priv - > id = = ID_MT7531 | | priv - > id = = ID_MT7988 )
mt7530_set ( priv , MT7531_CFC , MT7531_CPU_PMAP ( BIT ( port ) ) ) ;
2017-04-07 16:45:09 +08:00
/* CPU port gets connected to all user ports of
2020-09-11 21:48:54 +08:00
* the switch .
2017-04-07 16:45:09 +08:00
*/
mt7530_write ( priv , MT7530_PCR_P ( port ) ,
2017-10-26 11:22:56 -04:00
PCR_MATRIX ( dsa_user_ports ( priv - > ds ) ) ) ;
2017-04-07 16:45:09 +08:00
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_PORT_VLAN_MASK ,
MT7530_PORT_FALLBACK_MODE ) ;
2017-04-07 16:45:09 +08:00
return 0 ;
}
static int
mt7530_port_enable ( struct dsa_switch * ds , int port ,
struct phy_device * phy )
{
2022-06-10 19:05:39 +02:00
struct dsa_port * dp = dsa_to_port ( ds , port ) ;
2017-04-07 16:45:09 +08:00
struct mt7530_priv * priv = ds - > priv ;
mutex_lock ( & priv - > reg_mutex ) ;
/* Allow the user port gets connected to the cpu port and also
* restore the port matrix if the port is the member of a certain
* bridge .
*/
2022-06-10 19:05:39 +02:00
if ( dsa_port_is_user ( dp ) ) {
struct dsa_port * cpu_dp = dp - > cpu_dp ;
priv - > ports [ port ] . pm | = PCR_MATRIX ( BIT ( cpu_dp - > index ) ) ;
}
2017-04-07 16:45:09 +08:00
priv - > ports [ port ] . enable = true ;
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_MATRIX_MASK ,
priv - > ports [ port ] . pm ) ;
2020-03-27 15:44:12 +01:00
mt7530_clear ( priv , MT7530_PMCR_P ( port ) , PMCR_LINK_SETTINGS_MASK ) ;
2017-04-07 16:45:09 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
return 0 ;
}
static void
2019-02-24 20:44:43 +01:00
mt7530_port_disable ( struct dsa_switch * ds , int port )
2017-04-07 16:45:09 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
mutex_lock ( & priv - > reg_mutex ) ;
/* Clear up all port matrix which could be restored in the next
* enablement for the port .
*/
priv - > ports [ port ] . enable = false ;
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_MATRIX_MASK ,
PCR_MATRIX_CLR ) ;
2020-03-27 15:44:12 +01:00
mt7530_clear ( priv , MT7530_PMCR_P ( port ) , PMCR_LINK_SETTINGS_MASK ) ;
2017-04-07 16:45:09 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
}
2020-11-03 13:06:18 +08:00
static int
mt7530_port_change_mtu ( struct dsa_switch * ds , int port , int new_mtu )
{
struct mt7530_priv * priv = ds - > priv ;
int length ;
u32 val ;
/* When a new MTU is set, DSA always set the CPU port's MTU to the
* largest MTU of the slave ports . Because the switch only has a global
* RX length register , only allowing CPU port here is enough .
*/
if ( ! dsa_is_cpu_port ( ds , port ) )
return 0 ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2020-11-03 13:06:18 +08:00
val = mt7530_mii_read ( priv , MT7530_GMACCR ) ;
val & = ~ MAX_RX_PKT_LEN_MASK ;
/* RX length also includes Ethernet header, MTK tag, and FCS length */
length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN ;
if ( length < = 1522 ) {
val | = MAX_RX_PKT_LEN_1522 ;
} else if ( length < = 1536 ) {
val | = MAX_RX_PKT_LEN_1536 ;
} else if ( length < = 1552 ) {
val | = MAX_RX_PKT_LEN_1552 ;
} else {
val & = ~ MAX_RX_JUMBO_MASK ;
val | = MAX_RX_JUMBO ( DIV_ROUND_UP ( length , 1024 ) ) ;
val | = MAX_RX_PKT_LEN_JUMBO ;
}
mt7530_mii_write ( priv , MT7530_GMACCR , val ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2020-11-03 13:06:18 +08:00
return 0 ;
}
static int
mt7530_port_max_mtu ( struct dsa_switch * ds , int port )
{
return MT7530_MAX_MTU ;
}
2017-04-07 16:45:09 +08:00
static void
mt7530_stp_state_set ( struct dsa_switch * ds , int port , u8 state )
{
struct mt7530_priv * priv = ds - > priv ;
u32 stp_state ;
switch ( state ) {
case BR_STATE_DISABLED :
stp_state = MT7530_STP_DISABLED ;
break ;
case BR_STATE_BLOCKING :
stp_state = MT7530_STP_BLOCKING ;
break ;
case BR_STATE_LISTENING :
stp_state = MT7530_STP_LISTENING ;
break ;
case BR_STATE_LEARNING :
stp_state = MT7530_STP_LEARNING ;
break ;
case BR_STATE_FORWARDING :
default :
stp_state = MT7530_STP_FORWARDING ;
break ;
}
2021-08-04 00:04:03 +08:00
mt7530_rmw ( priv , MT7530_SSP_P ( port ) , FID_PST_MASK ( FID_BRIDGED ) ,
FID_PST ( FID_BRIDGED , stp_state ) ) ;
2017-04-07 16:45:09 +08:00
}
2021-03-16 01:09:40 +08:00
static int
mt7530_port_pre_bridge_flags ( struct dsa_switch * ds , int port ,
struct switchdev_brport_flags flags ,
struct netlink_ext_ack * extack )
{
if ( flags . mask & ~ ( BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD ) )
return - EINVAL ;
return 0 ;
}
static int
mt7530_port_bridge_flags ( struct dsa_switch * ds , int port ,
struct switchdev_brport_flags flags ,
struct netlink_ext_ack * extack )
{
struct mt7530_priv * priv = ds - > priv ;
if ( flags . mask & BR_LEARNING )
mt7530_rmw ( priv , MT7530_PSC_P ( port ) , SA_DIS ,
flags . val & BR_LEARNING ? 0 : SA_DIS ) ;
if ( flags . mask & BR_FLOOD )
mt7530_rmw ( priv , MT7530_MFC , UNU_FFP ( BIT ( port ) ) ,
flags . val & BR_FLOOD ? UNU_FFP ( BIT ( port ) ) : 0 ) ;
if ( flags . mask & BR_MCAST_FLOOD )
mt7530_rmw ( priv , MT7530_MFC , UNM_FFP ( BIT ( port ) ) ,
flags . val & BR_MCAST_FLOOD ? UNM_FFP ( BIT ( port ) ) : 0 ) ;
if ( flags . mask & BR_BCAST_FLOOD )
mt7530_rmw ( priv , MT7530_MFC , BC_FFP ( BIT ( port ) ) ,
flags . val & BR_BCAST_FLOOD ? BC_FFP ( BIT ( port ) ) : 0 ) ;
return 0 ;
}
2017-04-07 16:45:09 +08:00
static int
mt7530_port_bridge_join ( struct dsa_switch * ds , int port ,
2022-02-25 11:22:23 +02:00
struct dsa_bridge bridge , bool * tx_fwd_offload ,
struct netlink_ext_ack * extack )
2017-04-07 16:45:09 +08:00
{
2021-12-06 18:57:49 +02:00
struct dsa_port * dp = dsa_to_port ( ds , port ) , * other_dp ;
2022-06-10 19:05:39 +02:00
struct dsa_port * cpu_dp = dp - > cpu_dp ;
u32 port_bitmap = BIT ( cpu_dp - > index ) ;
2021-12-06 18:57:49 +02:00
struct mt7530_priv * priv = ds - > priv ;
2017-04-07 16:45:09 +08:00
mutex_lock ( & priv - > reg_mutex ) ;
2021-12-06 18:57:49 +02:00
dsa_switch_for_each_user_port ( other_dp , ds ) {
int other_port = other_dp - > index ;
if ( dp = = other_dp )
continue ;
2017-04-07 16:45:09 +08:00
/* Add this port to the port matrix of the other ports in the
* same bridge . If the port is disabled , port matrix is kept
* and not being setup until the port becomes enabled .
*/
net: dsa: keep the bridge_dev and bridge_num as part of the same structure
The main desire behind this is to provide coherent bridge information to
the fast path without locking.
For example, right now we set dp->bridge_dev and dp->bridge_num from
separate code paths, it is theoretically possible for a packet
transmission to read these two port properties consecutively and find a
bridge number which does not correspond with the bridge device.
Another desire is to start passing more complex bridge information to
dsa_switch_ops functions. For example, with FDB isolation, it is
expected that drivers will need to be passed the bridge which requested
an FDB/MDB entry to be offloaded, and along with that bridge_dev, the
associated bridge_num should be passed too, in case the driver might
want to implement an isolation scheme based on that number.
We already pass the {bridge_dev, bridge_num} pair to the TX forwarding
offload switch API, however we'd like to remove that and squash it into
the basic bridge join/leave API. So that means we need to pass this
pair to the bridge join/leave API.
During dsa_port_bridge_leave, first we unset dp->bridge_dev, then we
call the driver's .port_bridge_leave with what used to be our
dp->bridge_dev, but provided as an argument.
When bridge_dev and bridge_num get folded into a single structure, we
need to preserve this behavior in dsa_port_bridge_leave: we need a copy
of what used to be in dp->bridge.
Switch drivers check bridge membership by comparing dp->bridge_dev with
the provided bridge_dev, but now, if we provide the struct dsa_bridge as
a pointer, they cannot keep comparing dp->bridge to the provided
pointer, since this only points to an on-stack copy. To make this
obvious and prevent driver writers from forgetting and doing stupid
things, in this new API, the struct dsa_bridge is provided as a full
structure (not very large, contains an int and a pointer) instead of a
pointer. An explicit comparison function needs to be used to determine
bridge membership: dsa_port_offloads_bridge().
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Alvin Šipraga <alsi@bang-olufsen.dk>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-12-06 18:57:56 +02:00
if ( ! dsa_port_offloads_bridge ( other_dp , & bridge ) )
2021-12-06 18:57:49 +02:00
continue ;
if ( priv - > ports [ other_port ] . enable )
mt7530_set ( priv , MT7530_PCR_P ( other_port ) ,
PCR_MATRIX ( BIT ( port ) ) ) ;
priv - > ports [ other_port ] . pm | = PCR_MATRIX ( BIT ( port ) ) ;
port_bitmap | = BIT ( other_port ) ;
2017-04-07 16:45:09 +08:00
}
/* Add the all other ports to this port matrix. */
if ( priv - > ports [ port ] . enable )
mt7530_rmw ( priv , MT7530_PCR_P ( port ) ,
PCR_MATRIX_MASK , PCR_MATRIX ( port_bitmap ) ) ;
priv - > ports [ port ] . pm | = PCR_MATRIX ( port_bitmap ) ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_PORT_VLAN_MASK ,
MT7530_PORT_FALLBACK_MODE ) ;
2017-04-07 16:45:09 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
return 0 ;
}
2017-12-15 12:47:00 +08:00
static void
mt7530_port_set_vlan_unaware ( struct dsa_switch * ds , int port )
{
struct mt7530_priv * priv = ds - > priv ;
bool all_user_ports_removed = true ;
int i ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* This is called after .port_bridge_leave when leaving a VLAN-aware
* bridge . Don ' t set standalone ports to fallback mode .
2017-12-15 12:47:00 +08:00
*/
2021-12-06 18:57:53 +02:00
if ( dsa_port_bridge_dev_get ( dsa_to_port ( ds , port ) ) )
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_PORT_VLAN_MASK ,
MT7530_PORT_FALLBACK_MODE ) ;
2021-08-06 11:47:11 +08:00
mt7530_rmw ( priv , MT7530_PVC_P ( port ) ,
VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK ,
2020-04-14 14:34:08 +08:00
VLAN_ATTR ( MT7530_VLAN_TRANSPARENT ) |
2021-08-06 11:47:11 +08:00
PVC_EG_TAG ( MT7530_VLAN_EG_CONSISTENT ) |
MT7530_VLAN_ACC_ALL ) ;
2017-12-15 12:47:00 +08:00
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* Set PVID to 0 */
mt7530_rmw ( priv , MT7530_PPBV1_P ( port ) , G0_PORT_VID_MASK ,
G0_PORT_VID_DEF ) ;
2017-12-15 12:47:00 +08:00
for ( i = 0 ; i < MT7530_NUM_PORTS ; i + + ) {
if ( dsa_is_user_port ( ds , i ) & &
2019-10-21 16:51:15 -04:00
dsa_port_is_vlan_filtering ( dsa_to_port ( ds , i ) ) ) {
2017-12-15 12:47:00 +08:00
all_user_ports_removed = false ;
break ;
}
}
/* CPU port also does the same thing until all user ports belonging to
* the CPU port get out of VLAN filtering mode .
*/
if ( all_user_ports_removed ) {
2022-06-10 19:05:39 +02:00
struct dsa_port * dp = dsa_to_port ( ds , port ) ;
struct dsa_port * cpu_dp = dp - > cpu_dp ;
mt7530_write ( priv , MT7530_PCR_P ( cpu_dp - > index ) ,
2017-12-15 12:47:00 +08:00
PCR_MATRIX ( dsa_user_ports ( priv - > ds ) ) ) ;
2022-06-10 19:05:39 +02:00
mt7530_write ( priv , MT7530_PVC_P ( cpu_dp - > index ) , PORT_SPEC_TAG
2020-04-14 14:34:08 +08:00
| PVC_EG_TAG ( MT7530_VLAN_EG_CONSISTENT ) ) ;
2017-12-15 12:47:00 +08:00
}
}
static void
mt7530_port_set_vlan_aware ( struct dsa_switch * ds , int port )
{
struct mt7530_priv * priv = ds - > priv ;
/* Trapped into security mode allows packet forwarding through VLAN
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
* table lookup .
2017-12-15 12:47:00 +08:00
*/
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
if ( dsa_is_user_port ( ds , port ) ) {
2020-05-13 23:37:17 +08:00
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_PORT_VLAN_MASK ,
MT7530_PORT_SECURITY_MODE ) ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
mt7530_rmw ( priv , MT7530_PPBV1_P ( port ) , G0_PORT_VID_MASK ,
G0_PORT_VID ( priv - > ports [ port ] . pvid ) ) ;
2021-08-06 11:47:11 +08:00
/* Only accept tagged frames if PVID is not set */
if ( ! priv - > ports [ port ] . pvid )
mt7530_rmw ( priv , MT7530_PVC_P ( port ) , ACC_FRM_MASK ,
MT7530_VLAN_ACC_TAGGED ) ;
2017-12-15 12:47:00 +08:00
net: dsa: mt7530: don't change PVC_EG_TAG when CPU port becomes VLAN-aware
Frank reports that in a mt7530 setup where some ports are standalone and
some are in a VLAN-aware bridge, 8021q uppers of the standalone ports
lose their VLAN tag on xmit, as seen by the link partner.
This seems to occur because once the other ports join the VLAN-aware
bridge, mt7530_port_vlan_filtering() also calls
mt7530_port_set_vlan_aware(ds, cpu_dp->index), and this affects the way
that the switch processes the traffic of the standalone port.
Relevant is the PVC_EG_TAG bit. The MT7530 documentation says about it:
EG_TAG: Incoming Port Egress Tag VLAN Attribution
0: disabled (system default)
1: consistent (keep the original ingress tag attribute)
My interpretation is that this setting applies on the ingress port, and
"disabled" is basically the normal behavior, where the egress tag format
of the packet (tagged or untagged) is decided by the VLAN table
(MT7530_VLAN_EGRESS_UNTAG or MT7530_VLAN_EGRESS_TAG).
But there is also an option of overriding the system default behavior,
and for the egress tagging format of packets to be decided not by the
VLAN table, but simply by copying the ingress tag format (if ingress was
tagged, egress is tagged; if ingress was untagged, egress is untagged;
aka "consistent). This is useful in 2 scenarios:
- VLAN-unaware bridge ports will always encounter a miss in the VLAN
table. They should forward a packet as-is, though. So we use
"consistent" there. See commit e045124e9399 ("net: dsa: mt7530: fix
tagged frames pass-through in VLAN-unaware mode").
- Traffic injected from the CPU port. The operating system is in god
mode; if it wants a packet to exit as VLAN-tagged, it sends it as
VLAN-tagged. Otherwise it sends it as VLAN-untagged*.
*This is true only if we don't consider the bridge TX forwarding offload
feature, which mt7530 doesn't support.
So for now, make the CPU port always stay in "consistent" mode to allow
software VLANs to be forwarded to their egress ports with the VLAN tag
intact, and not stripped.
Link: https://lore.kernel.org/netdev/trinity-e6294d28-636c-4c40-bb8b-b523521b00be-1674233135062@3c-app-gmx-bs36/
Fixes: e045124e9399 ("net: dsa: mt7530: fix tagged frames pass-through in VLAN-unaware mode")
Reported-by: Frank Wunderlich <frank-w@public-files.de>
Tested-by: Frank Wunderlich <frank-w@public-files.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/20230205140713.1609281-1-vladimir.oltean@nxp.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-02-05 16:07:13 +02:00
/* Set the port as a user port which is to be able to recognize
* VID from incoming packets before fetching entry within the
* VLAN table .
*/
mt7530_rmw ( priv , MT7530_PVC_P ( port ) ,
VLAN_ATTR_MASK | PVC_EG_TAG_MASK ,
VLAN_ATTR ( MT7530_VLAN_USER ) |
PVC_EG_TAG ( MT7530_VLAN_EG_DISABLED ) ) ;
} else {
/* Also set CPU ports to the "user" VLAN port attribute, to
* allow VLAN classification , but keep the EG_TAG attribute as
* " consistent " ( i . o . w . don ' t change its value ) for packets
* received by the switch from the CPU , so that tagged packets
* are forwarded to user ports as tagged , and untagged as
* untagged .
*/
mt7530_rmw ( priv , MT7530_PVC_P ( port ) , VLAN_ATTR_MASK ,
VLAN_ATTR ( MT7530_VLAN_USER ) ) ;
}
2017-12-15 12:47:00 +08:00
}
2017-04-07 16:45:09 +08:00
static void
mt7530_port_bridge_leave ( struct dsa_switch * ds , int port ,
net: dsa: keep the bridge_dev and bridge_num as part of the same structure
The main desire behind this is to provide coherent bridge information to
the fast path without locking.
For example, right now we set dp->bridge_dev and dp->bridge_num from
separate code paths, it is theoretically possible for a packet
transmission to read these two port properties consecutively and find a
bridge number which does not correspond with the bridge device.
Another desire is to start passing more complex bridge information to
dsa_switch_ops functions. For example, with FDB isolation, it is
expected that drivers will need to be passed the bridge which requested
an FDB/MDB entry to be offloaded, and along with that bridge_dev, the
associated bridge_num should be passed too, in case the driver might
want to implement an isolation scheme based on that number.
We already pass the {bridge_dev, bridge_num} pair to the TX forwarding
offload switch API, however we'd like to remove that and squash it into
the basic bridge join/leave API. So that means we need to pass this
pair to the bridge join/leave API.
During dsa_port_bridge_leave, first we unset dp->bridge_dev, then we
call the driver's .port_bridge_leave with what used to be our
dp->bridge_dev, but provided as an argument.
When bridge_dev and bridge_num get folded into a single structure, we
need to preserve this behavior in dsa_port_bridge_leave: we need a copy
of what used to be in dp->bridge.
Switch drivers check bridge membership by comparing dp->bridge_dev with
the provided bridge_dev, but now, if we provide the struct dsa_bridge as
a pointer, they cannot keep comparing dp->bridge to the provided
pointer, since this only points to an on-stack copy. To make this
obvious and prevent driver writers from forgetting and doing stupid
things, in this new API, the struct dsa_bridge is provided as a full
structure (not very large, contains an int and a pointer) instead of a
pointer. An explicit comparison function needs to be used to determine
bridge membership: dsa_port_offloads_bridge().
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Alvin Šipraga <alsi@bang-olufsen.dk>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-12-06 18:57:56 +02:00
struct dsa_bridge bridge )
2017-04-07 16:45:09 +08:00
{
2021-12-06 18:57:49 +02:00
struct dsa_port * dp = dsa_to_port ( ds , port ) , * other_dp ;
2022-06-10 19:05:39 +02:00
struct dsa_port * cpu_dp = dp - > cpu_dp ;
2017-04-07 16:45:09 +08:00
struct mt7530_priv * priv = ds - > priv ;
mutex_lock ( & priv - > reg_mutex ) ;
2021-12-06 18:57:49 +02:00
dsa_switch_for_each_user_port ( other_dp , ds ) {
int other_port = other_dp - > index ;
if ( dp = = other_dp )
continue ;
2017-04-07 16:45:09 +08:00
/* Remove this port from the port matrix of the other ports
* in the same bridge . If the port is disabled , port matrix
* is kept and not being setup until the port becomes enabled .
*/
net: dsa: keep the bridge_dev and bridge_num as part of the same structure
The main desire behind this is to provide coherent bridge information to
the fast path without locking.
For example, right now we set dp->bridge_dev and dp->bridge_num from
separate code paths, it is theoretically possible for a packet
transmission to read these two port properties consecutively and find a
bridge number which does not correspond with the bridge device.
Another desire is to start passing more complex bridge information to
dsa_switch_ops functions. For example, with FDB isolation, it is
expected that drivers will need to be passed the bridge which requested
an FDB/MDB entry to be offloaded, and along with that bridge_dev, the
associated bridge_num should be passed too, in case the driver might
want to implement an isolation scheme based on that number.
We already pass the {bridge_dev, bridge_num} pair to the TX forwarding
offload switch API, however we'd like to remove that and squash it into
the basic bridge join/leave API. So that means we need to pass this
pair to the bridge join/leave API.
During dsa_port_bridge_leave, first we unset dp->bridge_dev, then we
call the driver's .port_bridge_leave with what used to be our
dp->bridge_dev, but provided as an argument.
When bridge_dev and bridge_num get folded into a single structure, we
need to preserve this behavior in dsa_port_bridge_leave: we need a copy
of what used to be in dp->bridge.
Switch drivers check bridge membership by comparing dp->bridge_dev with
the provided bridge_dev, but now, if we provide the struct dsa_bridge as
a pointer, they cannot keep comparing dp->bridge to the provided
pointer, since this only points to an on-stack copy. To make this
obvious and prevent driver writers from forgetting and doing stupid
things, in this new API, the struct dsa_bridge is provided as a full
structure (not very large, contains an int and a pointer) instead of a
pointer. An explicit comparison function needs to be used to determine
bridge membership: dsa_port_offloads_bridge().
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Alvin Šipraga <alsi@bang-olufsen.dk>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-12-06 18:57:56 +02:00
if ( ! dsa_port_offloads_bridge ( other_dp , & bridge ) )
2021-12-06 18:57:49 +02:00
continue ;
if ( priv - > ports [ other_port ] . enable )
mt7530_clear ( priv , MT7530_PCR_P ( other_port ) ,
PCR_MATRIX ( BIT ( port ) ) ) ;
priv - > ports [ other_port ] . pm & = ~ PCR_MATRIX ( BIT ( port ) ) ;
2017-04-07 16:45:09 +08:00
}
/* Set the cpu port to be the only one in the port matrix of
* this port .
*/
if ( priv - > ports [ port ] . enable )
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_MATRIX_MASK ,
2022-06-10 19:05:39 +02:00
PCR_MATRIX ( BIT ( cpu_dp - > index ) ) ) ;
priv - > ports [ port ] . pm = PCR_MATRIX ( BIT ( cpu_dp - > index ) ) ;
2017-04-07 16:45:09 +08:00
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN - unaware
* port .
*/
mt7530_rmw ( priv , MT7530_PCR_P ( port ) , PCR_PORT_VLAN_MASK ,
MT7530_PORT_MATRIX_MODE ) ;
2017-04-07 16:45:09 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
}
static int
mt7530_port_fdb_add ( struct dsa_switch * ds , int port ,
net: dsa: request drivers to perform FDB isolation
For DSA, to encourage drivers to perform FDB isolation simply means to
track which bridge does each FDB and MDB entry belong to. It then
becomes the driver responsibility to use something that makes the FDB
entry from one bridge not match the FDB lookup of ports from other
bridges.
The top-level functions where the bridge is determined are:
- dsa_port_fdb_{add,del}
- dsa_port_host_fdb_{add,del}
- dsa_port_mdb_{add,del}
- dsa_port_host_mdb_{add,del}
aka the pre-crosschip-notifier functions.
Changing the API to pass a reference to a bridge is not superfluous, and
looking at the passed bridge argument is not the same as having the
driver look at dsa_to_port(ds, port)->bridge from the ->port_fdb_add()
method.
DSA installs FDB and MDB entries on shared (CPU and DSA) ports as well,
and those do not have any dp->bridge information to retrieve, because
they are not in any bridge - they are merely the pipes that serve the
user ports that are in one or multiple bridges.
The struct dsa_bridge associated with each FDB/MDB entry is encapsulated
in a larger "struct dsa_db" database. Although only databases associated
to bridges are notified for now, this API will be the starting point for
implementing IFF_UNICAST_FLT in DSA. There, the idea is to install FDB
entries on the CPU port which belong to the corresponding user port's
port database. These are supposed to match only when the port is
standalone.
It is better to introduce the API in its expected final form than to
introduce it for bridges first, then to have to change drivers which may
have made one or more assumptions.
Drivers can use the provided bridge.num, but they can also use a
different numbering scheme that is more convenient.
DSA must perform refcounting on the CPU and DSA ports by also taking
into account the bridge number. So if two bridges request the same local
address, DSA must notify the driver twice, once for each bridge.
In fact, if the driver supports FDB isolation, DSA must perform
refcounting per bridge, but if the driver doesn't, DSA must refcount
host addresses across all bridges, otherwise it would be telling the
driver to delete an FDB entry for a bridge and the driver would delete
it for all bridges. So introduce a bool fdb_isolation in drivers which
would make all bridge databases passed to the cross-chip notifier have
the same number (0). This makes dsa_mac_addr_find() -> dsa_db_equal()
say that all bridge databases are the same database - which is
essentially the legacy behavior.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-25 11:22:22 +02:00
const unsigned char * addr , u16 vid ,
struct dsa_db db )
2017-04-07 16:45:09 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
2017-08-06 16:15:40 +03:00
int ret ;
2017-04-07 16:45:09 +08:00
u8 port_mask = BIT ( port ) ;
mutex_lock ( & priv - > reg_mutex ) ;
2017-08-06 16:15:39 +03:00
mt7530_fdb_write ( priv , vid , port_mask , addr , - 1 , STATIC_ENT ) ;
2018-04-02 16:24:14 -07:00
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_WRITE , NULL ) ;
2017-04-07 16:45:09 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
2017-08-06 16:15:40 +03:00
return ret ;
2017-04-07 16:45:09 +08:00
}
static int
mt7530_port_fdb_del ( struct dsa_switch * ds , int port ,
net: dsa: request drivers to perform FDB isolation
For DSA, to encourage drivers to perform FDB isolation simply means to
track which bridge does each FDB and MDB entry belong to. It then
becomes the driver responsibility to use something that makes the FDB
entry from one bridge not match the FDB lookup of ports from other
bridges.
The top-level functions where the bridge is determined are:
- dsa_port_fdb_{add,del}
- dsa_port_host_fdb_{add,del}
- dsa_port_mdb_{add,del}
- dsa_port_host_mdb_{add,del}
aka the pre-crosschip-notifier functions.
Changing the API to pass a reference to a bridge is not superfluous, and
looking at the passed bridge argument is not the same as having the
driver look at dsa_to_port(ds, port)->bridge from the ->port_fdb_add()
method.
DSA installs FDB and MDB entries on shared (CPU and DSA) ports as well,
and those do not have any dp->bridge information to retrieve, because
they are not in any bridge - they are merely the pipes that serve the
user ports that are in one or multiple bridges.
The struct dsa_bridge associated with each FDB/MDB entry is encapsulated
in a larger "struct dsa_db" database. Although only databases associated
to bridges are notified for now, this API will be the starting point for
implementing IFF_UNICAST_FLT in DSA. There, the idea is to install FDB
entries on the CPU port which belong to the corresponding user port's
port database. These are supposed to match only when the port is
standalone.
It is better to introduce the API in its expected final form than to
introduce it for bridges first, then to have to change drivers which may
have made one or more assumptions.
Drivers can use the provided bridge.num, but they can also use a
different numbering scheme that is more convenient.
DSA must perform refcounting on the CPU and DSA ports by also taking
into account the bridge number. So if two bridges request the same local
address, DSA must notify the driver twice, once for each bridge.
In fact, if the driver supports FDB isolation, DSA must perform
refcounting per bridge, but if the driver doesn't, DSA must refcount
host addresses across all bridges, otherwise it would be telling the
driver to delete an FDB entry for a bridge and the driver would delete
it for all bridges. So introduce a bool fdb_isolation in drivers which
would make all bridge databases passed to the cross-chip notifier have
the same number (0). This makes dsa_mac_addr_find() -> dsa_db_equal()
say that all bridge databases are the same database - which is
essentially the legacy behavior.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-25 11:22:22 +02:00
const unsigned char * addr , u16 vid ,
struct dsa_db db )
2017-04-07 16:45:09 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
int ret ;
u8 port_mask = BIT ( port ) ;
mutex_lock ( & priv - > reg_mutex ) ;
2017-08-06 16:15:39 +03:00
mt7530_fdb_write ( priv , vid , port_mask , addr , - 1 , STATIC_EMP ) ;
2018-04-02 16:24:14 -07:00
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_WRITE , NULL ) ;
2017-04-07 16:45:09 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
return ret ;
}
static int
mt7530_port_fdb_dump ( struct dsa_switch * ds , int port ,
2017-08-06 16:15:49 +03:00
dsa_fdb_dump_cb_t * cb , void * data )
2017-04-07 16:45:09 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
struct mt7530_fdb _fdb = { 0 } ;
int cnt = MT7530_NUM_FDB_RECORDS ;
int ret = 0 ;
u32 rsp = 0 ;
mutex_lock ( & priv - > reg_mutex ) ;
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_START , & rsp ) ;
if ( ret < 0 )
goto err ;
do {
if ( rsp & ATC_SRCH_HIT ) {
mt7530_fdb_read ( priv , & _fdb ) ;
if ( _fdb . port_mask & BIT ( port ) ) {
2017-08-06 16:15:49 +03:00
ret = cb ( _fdb . mac , _fdb . vid , _fdb . noarp ,
data ) ;
2017-04-07 16:45:09 +08:00
if ( ret < 0 )
break ;
}
}
} while ( - - cnt & &
! ( rsp & ATC_SRCH_END ) & &
! mt7530_fdb_cmd ( priv , MT7530_FDB_NEXT , & rsp ) ) ;
err :
mutex_unlock ( & priv - > reg_mutex ) ;
return 0 ;
}
2021-03-16 01:09:40 +08:00
static int
mt7530_port_mdb_add ( struct dsa_switch * ds , int port ,
net: dsa: request drivers to perform FDB isolation
For DSA, to encourage drivers to perform FDB isolation simply means to
track which bridge does each FDB and MDB entry belong to. It then
becomes the driver responsibility to use something that makes the FDB
entry from one bridge not match the FDB lookup of ports from other
bridges.
The top-level functions where the bridge is determined are:
- dsa_port_fdb_{add,del}
- dsa_port_host_fdb_{add,del}
- dsa_port_mdb_{add,del}
- dsa_port_host_mdb_{add,del}
aka the pre-crosschip-notifier functions.
Changing the API to pass a reference to a bridge is not superfluous, and
looking at the passed bridge argument is not the same as having the
driver look at dsa_to_port(ds, port)->bridge from the ->port_fdb_add()
method.
DSA installs FDB and MDB entries on shared (CPU and DSA) ports as well,
and those do not have any dp->bridge information to retrieve, because
they are not in any bridge - they are merely the pipes that serve the
user ports that are in one or multiple bridges.
The struct dsa_bridge associated with each FDB/MDB entry is encapsulated
in a larger "struct dsa_db" database. Although only databases associated
to bridges are notified for now, this API will be the starting point for
implementing IFF_UNICAST_FLT in DSA. There, the idea is to install FDB
entries on the CPU port which belong to the corresponding user port's
port database. These are supposed to match only when the port is
standalone.
It is better to introduce the API in its expected final form than to
introduce it for bridges first, then to have to change drivers which may
have made one or more assumptions.
Drivers can use the provided bridge.num, but they can also use a
different numbering scheme that is more convenient.
DSA must perform refcounting on the CPU and DSA ports by also taking
into account the bridge number. So if two bridges request the same local
address, DSA must notify the driver twice, once for each bridge.
In fact, if the driver supports FDB isolation, DSA must perform
refcounting per bridge, but if the driver doesn't, DSA must refcount
host addresses across all bridges, otherwise it would be telling the
driver to delete an FDB entry for a bridge and the driver would delete
it for all bridges. So introduce a bool fdb_isolation in drivers which
would make all bridge databases passed to the cross-chip notifier have
the same number (0). This makes dsa_mac_addr_find() -> dsa_db_equal()
say that all bridge databases are the same database - which is
essentially the legacy behavior.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-25 11:22:22 +02:00
const struct switchdev_obj_port_mdb * mdb ,
struct dsa_db db )
2021-03-16 01:09:40 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
const u8 * addr = mdb - > addr ;
u16 vid = mdb - > vid ;
u8 port_mask = 0 ;
int ret ;
mutex_lock ( & priv - > reg_mutex ) ;
mt7530_fdb_write ( priv , vid , 0 , addr , 0 , STATIC_EMP ) ;
if ( ! mt7530_fdb_cmd ( priv , MT7530_FDB_READ , NULL ) )
port_mask = ( mt7530_read ( priv , MT7530_ATRD ) > > PORT_MAP )
& PORT_MAP_MASK ;
port_mask | = BIT ( port ) ;
mt7530_fdb_write ( priv , vid , port_mask , addr , - 1 , STATIC_ENT ) ;
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_WRITE , NULL ) ;
mutex_unlock ( & priv - > reg_mutex ) ;
return ret ;
}
static int
mt7530_port_mdb_del ( struct dsa_switch * ds , int port ,
net: dsa: request drivers to perform FDB isolation
For DSA, to encourage drivers to perform FDB isolation simply means to
track which bridge does each FDB and MDB entry belong to. It then
becomes the driver responsibility to use something that makes the FDB
entry from one bridge not match the FDB lookup of ports from other
bridges.
The top-level functions where the bridge is determined are:
- dsa_port_fdb_{add,del}
- dsa_port_host_fdb_{add,del}
- dsa_port_mdb_{add,del}
- dsa_port_host_mdb_{add,del}
aka the pre-crosschip-notifier functions.
Changing the API to pass a reference to a bridge is not superfluous, and
looking at the passed bridge argument is not the same as having the
driver look at dsa_to_port(ds, port)->bridge from the ->port_fdb_add()
method.
DSA installs FDB and MDB entries on shared (CPU and DSA) ports as well,
and those do not have any dp->bridge information to retrieve, because
they are not in any bridge - they are merely the pipes that serve the
user ports that are in one or multiple bridges.
The struct dsa_bridge associated with each FDB/MDB entry is encapsulated
in a larger "struct dsa_db" database. Although only databases associated
to bridges are notified for now, this API will be the starting point for
implementing IFF_UNICAST_FLT in DSA. There, the idea is to install FDB
entries on the CPU port which belong to the corresponding user port's
port database. These are supposed to match only when the port is
standalone.
It is better to introduce the API in its expected final form than to
introduce it for bridges first, then to have to change drivers which may
have made one or more assumptions.
Drivers can use the provided bridge.num, but they can also use a
different numbering scheme that is more convenient.
DSA must perform refcounting on the CPU and DSA ports by also taking
into account the bridge number. So if two bridges request the same local
address, DSA must notify the driver twice, once for each bridge.
In fact, if the driver supports FDB isolation, DSA must perform
refcounting per bridge, but if the driver doesn't, DSA must refcount
host addresses across all bridges, otherwise it would be telling the
driver to delete an FDB entry for a bridge and the driver would delete
it for all bridges. So introduce a bool fdb_isolation in drivers which
would make all bridge databases passed to the cross-chip notifier have
the same number (0). This makes dsa_mac_addr_find() -> dsa_db_equal()
say that all bridge databases are the same database - which is
essentially the legacy behavior.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-25 11:22:22 +02:00
const struct switchdev_obj_port_mdb * mdb ,
struct dsa_db db )
2021-03-16 01:09:40 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
const u8 * addr = mdb - > addr ;
u16 vid = mdb - > vid ;
u8 port_mask = 0 ;
int ret ;
mutex_lock ( & priv - > reg_mutex ) ;
mt7530_fdb_write ( priv , vid , 0 , addr , 0 , STATIC_EMP ) ;
if ( ! mt7530_fdb_cmd ( priv , MT7530_FDB_READ , NULL ) )
port_mask = ( mt7530_read ( priv , MT7530_ATRD ) > > PORT_MAP )
& PORT_MAP_MASK ;
port_mask & = ~ BIT ( port ) ;
mt7530_fdb_write ( priv , vid , port_mask , addr , - 1 ,
port_mask ? STATIC_ENT : STATIC_EMP ) ;
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_WRITE , NULL ) ;
mutex_unlock ( & priv - > reg_mutex ) ;
return ret ;
}
2017-12-15 12:47:00 +08:00
static int
mt7530_vlan_cmd ( struct mt7530_priv * priv , enum mt7530_vlan_cmd cmd , u16 vid )
{
struct mt7530_dummy_poll p ;
u32 val ;
int ret ;
val = VTCR_BUSY | VTCR_FUNC ( cmd ) | vid ;
mt7530_write ( priv , MT7530_VTCR , val ) ;
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7530_VTCR ) ;
ret = readx_poll_timeout ( _mt7530_read , & p , val ,
! ( val & VTCR_BUSY ) , 20 , 20000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " poll timeout \n " ) ;
return ret ;
}
val = mt7530_read ( priv , MT7530_VTCR ) ;
if ( val & VTCR_INVALID ) {
dev_err ( priv - > dev , " read VTCR invalid \n " ) ;
return - EINVAL ;
}
return 0 ;
}
static int
2021-02-13 22:43:19 +02:00
mt7530_port_vlan_filtering ( struct dsa_switch * ds , int port , bool vlan_filtering ,
struct netlink_ext_ack * extack )
2017-12-15 12:47:00 +08:00
{
2022-06-10 19:05:39 +02:00
struct dsa_port * dp = dsa_to_port ( ds , port ) ;
struct dsa_port * cpu_dp = dp - > cpu_dp ;
2017-12-15 12:47:00 +08:00
if ( vlan_filtering ) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set , Otherwise , the
* port and the corresponding CPU port is required the setup
* for becoming a VLAN - aware port .
*/
mt7530_port_set_vlan_aware ( ds , port ) ;
2022-06-10 19:05:39 +02:00
mt7530_port_set_vlan_aware ( ds , cpu_dp - > index ) ;
2019-04-28 21:45:47 +03:00
} else {
mt7530_port_set_vlan_unaware ( ds , port ) ;
2017-12-15 12:47:00 +08:00
}
return 0 ;
}
static void
mt7530_hw_vlan_add ( struct mt7530_priv * priv ,
struct mt7530_hw_vlan_entry * entry )
{
2022-06-10 19:05:37 +02:00
struct dsa_port * dp = dsa_to_port ( priv - > ds , entry - > port ) ;
2017-12-15 12:47:00 +08:00
u8 new_members ;
u32 val ;
2022-06-10 19:05:37 +02:00
new_members = entry - > old_members | BIT ( entry - > port ) ;
2017-12-15 12:47:00 +08:00
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members .
*/
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
val = IVL_MAC | VTAG_EN | PORT_MEM ( new_members ) | FID ( FID_BRIDGED ) |
VLAN_VALID ;
2017-12-15 12:47:00 +08:00
mt7530_write ( priv , MT7530_VAWD1 , val ) ;
/* Decide whether adding tag or not for those outgoing packets from the
* port inside the VLAN .
2022-06-10 19:05:37 +02:00
* CPU port is always taken as a tagged port for serving more than one
2017-12-15 12:47:00 +08:00
* VLANs across and also being applied with egress type stack mode for
* that VLAN tags would be appended after hardware special tag used as
* DSA tag .
*/
2022-06-10 19:05:37 +02:00
if ( dsa_port_is_cpu ( dp ) )
val = MT7530_VLAN_EGRESS_STACK ;
else if ( entry - > untagged )
val = MT7530_VLAN_EGRESS_UNTAG ;
else
val = MT7530_VLAN_EGRESS_TAG ;
2017-12-15 12:47:00 +08:00
mt7530_rmw ( priv , MT7530_VAWD2 ,
2022-06-10 19:05:37 +02:00
ETAG_CTRL_P_MASK ( entry - > port ) ,
ETAG_CTRL_P ( entry - > port , val ) ) ;
2017-12-15 12:47:00 +08:00
}
static void
mt7530_hw_vlan_del ( struct mt7530_priv * priv ,
struct mt7530_hw_vlan_entry * entry )
{
u8 new_members ;
u32 val ;
new_members = entry - > old_members & ~ BIT ( entry - > port ) ;
val = mt7530_read ( priv , MT7530_VAWD1 ) ;
if ( ! ( val & VLAN_VALID ) ) {
dev_err ( priv - > dev ,
" Cannot be deleted due to invalid entry \n " ) ;
return ;
}
2022-06-10 19:05:37 +02:00
if ( new_members ) {
2017-12-15 12:47:00 +08:00
val = IVL_MAC | VTAG_EN | PORT_MEM ( new_members ) |
VLAN_VALID ;
mt7530_write ( priv , MT7530_VAWD1 , val ) ;
} else {
mt7530_write ( priv , MT7530_VAWD1 , 0 ) ;
mt7530_write ( priv , MT7530_VAWD2 , 0 ) ;
}
}
static void
mt7530_hw_vlan_update ( struct mt7530_priv * priv , u16 vid ,
struct mt7530_hw_vlan_entry * entry ,
mt7530_vlan_op vlan_op )
{
u32 val ;
/* Fetch entry */
mt7530_vlan_cmd ( priv , MT7530_VTCR_RD_VID , vid ) ;
val = mt7530_read ( priv , MT7530_VAWD1 ) ;
entry - > old_members = ( val > > PORT_MEM_SHFT ) & PORT_MEM_MASK ;
/* Manipulate entry */
vlan_op ( priv , entry ) ;
/* Flush result to hardware */
mt7530_vlan_cmd ( priv , MT7530_VTCR_WR_VID , vid ) ;
}
2021-08-25 00:52:52 +08:00
static int
mt7530_setup_vlan0 ( struct mt7530_priv * priv )
{
u32 val ;
/* Validate the entry with independent learning, keep the original
* ingress tag attribute .
*/
val = IVL_MAC | EG_CON | PORT_MEM ( MT7530_ALL_MEMBERS ) | FID ( FID_BRIDGED ) |
VLAN_VALID ;
mt7530_write ( priv , MT7530_VAWD1 , val ) ;
return mt7530_vlan_cmd ( priv , MT7530_VTCR_WR_VID , 0 ) ;
}
2021-01-09 02:01:53 +02:00
static int
2017-12-15 12:47:00 +08:00
mt7530_port_vlan_add ( struct dsa_switch * ds , int port ,
2021-02-13 22:43:18 +02:00
const struct switchdev_obj_port_vlan * vlan ,
struct netlink_ext_ack * extack )
2017-12-15 12:47:00 +08:00
{
bool untagged = vlan - > flags & BRIDGE_VLAN_INFO_UNTAGGED ;
bool pvid = vlan - > flags & BRIDGE_VLAN_INFO_PVID ;
struct mt7530_hw_vlan_entry new_entry ;
struct mt7530_priv * priv = ds - > priv ;
mutex_lock ( & priv - > reg_mutex ) ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
mt7530_hw_vlan_entry_init ( & new_entry , port , untagged ) ;
mt7530_hw_vlan_update ( priv , vlan - > vid , & new_entry , mt7530_hw_vlan_add ) ;
2017-12-15 12:47:00 +08:00
if ( pvid ) {
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
priv - > ports [ port ] . pvid = vlan - > vid ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
2021-08-06 11:47:11 +08:00
/* Accept all frames if PVID is set */
mt7530_rmw ( priv , MT7530_PVC_P ( port ) , ACC_FRM_MASK ,
MT7530_VLAN_ACC_ALL ) ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* Only configure PVID if VLAN filtering is enabled */
if ( dsa_port_is_vlan_filtering ( dsa_to_port ( ds , port ) ) )
mt7530_rmw ( priv , MT7530_PPBV1_P ( port ) ,
G0_PORT_VID_MASK ,
G0_PORT_VID ( vlan - > vid ) ) ;
2021-08-06 11:47:11 +08:00
} else if ( vlan - > vid & & priv - > ports [ port ] . pvid = = vlan - > vid ) {
/* This VLAN is overwritten without PVID, so unset it */
priv - > ports [ port ] . pvid = G0_PORT_VID_DEF ;
/* Only accept tagged frames if the port is VLAN-aware */
if ( dsa_port_is_vlan_filtering ( dsa_to_port ( ds , port ) ) )
mt7530_rmw ( priv , MT7530_PVC_P ( port ) , ACC_FRM_MASK ,
MT7530_VLAN_ACC_TAGGED ) ;
mt7530_rmw ( priv , MT7530_PPBV1_P ( port ) , G0_PORT_VID_MASK ,
G0_PORT_VID_DEF ) ;
2017-12-15 12:47:00 +08:00
}
mutex_unlock ( & priv - > reg_mutex ) ;
2021-01-09 02:01:53 +02:00
return 0 ;
2017-12-15 12:47:00 +08:00
}
static int
mt7530_port_vlan_del ( struct dsa_switch * ds , int port ,
const struct switchdev_obj_port_vlan * vlan )
{
struct mt7530_hw_vlan_entry target_entry ;
struct mt7530_priv * priv = ds - > priv ;
mutex_lock ( & priv - > reg_mutex ) ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
mt7530_hw_vlan_entry_init ( & target_entry , port , 0 ) ;
mt7530_hw_vlan_update ( priv , vlan - > vid , & target_entry ,
mt7530_hw_vlan_del ) ;
2017-12-15 12:47:00 +08:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
/* PVID is being restored to the default whenever the PVID port
* is being removed from the VLAN .
*/
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
if ( priv - > ports [ port ] . pvid = = vlan - > vid ) {
priv - > ports [ port ] . pvid = G0_PORT_VID_DEF ;
2021-08-06 11:47:11 +08:00
/* Only accept tagged frames if the port is VLAN-aware */
if ( dsa_port_is_vlan_filtering ( dsa_to_port ( ds , port ) ) )
mt7530_rmw ( priv , MT7530_PVC_P ( port ) , ACC_FRM_MASK ,
MT7530_VLAN_ACC_TAGGED ) ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
mt7530_rmw ( priv , MT7530_PPBV1_P ( port ) , G0_PORT_VID_MASK ,
G0_PORT_VID_DEF ) ;
}
2017-12-15 12:47:00 +08:00
mutex_unlock ( & priv - > reg_mutex ) ;
return 0 ;
}
2020-09-11 21:48:54 +08:00
static int mt753x_mirror_port_get ( unsigned int id , u32 val )
{
return ( id = = ID_MT7531 ) ? MT7531_MIRROR_PORT_GET ( val ) :
MIRROR_PORT ( val ) ;
}
static int mt753x_mirror_port_set ( unsigned int id , u32 val )
{
return ( id = = ID_MT7531 ) ? MT7531_MIRROR_PORT_SET ( val ) :
MIRROR_PORT ( val ) ;
}
static int mt753x_port_mirror_add ( struct dsa_switch * ds , int port ,
2020-03-06 20:35:35 +08:00
struct dsa_mall_mirror_tc_entry * mirror ,
2022-03-16 22:41:43 +02:00
bool ingress , struct netlink_ext_ack * extack )
2020-03-06 20:35:35 +08:00
{
struct mt7530_priv * priv = ds - > priv ;
2020-09-11 21:48:54 +08:00
int monitor_port ;
2020-03-06 20:35:35 +08:00
u32 val ;
/* Check for existent entry */
if ( ( ingress ? priv - > mirror_rx : priv - > mirror_tx ) & BIT ( port ) )
return - EEXIST ;
2020-09-11 21:48:54 +08:00
val = mt7530_read ( priv , MT753X_MIRROR_REG ( priv - > id ) ) ;
2020-03-06 20:35:35 +08:00
/* MT7530 only supports one monitor port */
2020-09-11 21:48:54 +08:00
monitor_port = mt753x_mirror_port_get ( priv - > id , val ) ;
if ( val & MT753X_MIRROR_EN ( priv - > id ) & &
monitor_port ! = mirror - > to_local_port )
2020-03-06 20:35:35 +08:00
return - EEXIST ;
2020-09-11 21:48:54 +08:00
val | = MT753X_MIRROR_EN ( priv - > id ) ;
val & = ~ MT753X_MIRROR_MASK ( priv - > id ) ;
val | = mt753x_mirror_port_set ( priv - > id , mirror - > to_local_port ) ;
mt7530_write ( priv , MT753X_MIRROR_REG ( priv - > id ) , val ) ;
2020-03-06 20:35:35 +08:00
val = mt7530_read ( priv , MT7530_PCR_P ( port ) ) ;
if ( ingress ) {
val | = PORT_RX_MIR ;
priv - > mirror_rx | = BIT ( port ) ;
} else {
val | = PORT_TX_MIR ;
priv - > mirror_tx | = BIT ( port ) ;
}
mt7530_write ( priv , MT7530_PCR_P ( port ) , val ) ;
return 0 ;
}
2020-09-11 21:48:54 +08:00
static void mt753x_port_mirror_del ( struct dsa_switch * ds , int port ,
2020-03-06 20:35:35 +08:00
struct dsa_mall_mirror_tc_entry * mirror )
{
struct mt7530_priv * priv = ds - > priv ;
u32 val ;
val = mt7530_read ( priv , MT7530_PCR_P ( port ) ) ;
if ( mirror - > ingress ) {
val & = ~ PORT_RX_MIR ;
priv - > mirror_rx & = ~ BIT ( port ) ;
} else {
val & = ~ PORT_TX_MIR ;
priv - > mirror_tx & = ~ BIT ( port ) ;
}
mt7530_write ( priv , MT7530_PCR_P ( port ) , val ) ;
if ( ! priv - > mirror_rx & & ! priv - > mirror_tx ) {
2020-09-11 21:48:54 +08:00
val = mt7530_read ( priv , MT753X_MIRROR_REG ( priv - > id ) ) ;
val & = ~ MT753X_MIRROR_EN ( priv - > id ) ;
mt7530_write ( priv , MT753X_MIRROR_REG ( priv - > id ) , val ) ;
2020-03-06 20:35:35 +08:00
}
}
2017-04-07 16:45:09 +08:00
static enum dsa_tag_protocol
2020-01-07 21:06:05 -08:00
mtk_get_tag_protocol ( struct dsa_switch * ds , int port ,
enum dsa_tag_protocol mp )
2017-04-07 16:45:09 +08:00
{
2021-07-31 01:57:14 +03:00
return DSA_TAG_PROTO_MTK ;
2017-04-07 16:45:09 +08:00
}
2021-02-26 14:32:26 +08:00
# ifdef CONFIG_GPIOLIB
2021-01-25 12:43:22 +08:00
static inline u32
mt7530_gpio_to_bit ( unsigned int offset )
{
/* Map GPIO offset to register bit
* [ 2 : 0 ] port 0 LED 0. .2 as GPIO 0. .2
* [ 6 : 4 ] port 1 LED 0. .2 as GPIO 3. .5
* [ 10 : 8 ] port 2 LED 0. .2 as GPIO 6. .8
* [ 14 : 12 ] port 3 LED 0. .2 as GPIO 9. .11
* [ 18 : 16 ] port 4 LED 0. .2 as GPIO 12. .14
*/
return BIT ( offset + offset / 3 ) ;
}
static int
mt7530_gpio_get ( struct gpio_chip * gc , unsigned int offset )
{
struct mt7530_priv * priv = gpiochip_get_data ( gc ) ;
u32 bit = mt7530_gpio_to_bit ( offset ) ;
return ! ! ( mt7530_read ( priv , MT7530_LED_GPIO_DATA ) & bit ) ;
}
static void
mt7530_gpio_set ( struct gpio_chip * gc , unsigned int offset , int value )
{
struct mt7530_priv * priv = gpiochip_get_data ( gc ) ;
u32 bit = mt7530_gpio_to_bit ( offset ) ;
if ( value )
mt7530_set ( priv , MT7530_LED_GPIO_DATA , bit ) ;
else
mt7530_clear ( priv , MT7530_LED_GPIO_DATA , bit ) ;
}
static int
mt7530_gpio_get_direction ( struct gpio_chip * gc , unsigned int offset )
{
struct mt7530_priv * priv = gpiochip_get_data ( gc ) ;
u32 bit = mt7530_gpio_to_bit ( offset ) ;
return ( mt7530_read ( priv , MT7530_LED_GPIO_DIR ) & bit ) ?
GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN ;
}
static int
mt7530_gpio_direction_input ( struct gpio_chip * gc , unsigned int offset )
{
struct mt7530_priv * priv = gpiochip_get_data ( gc ) ;
u32 bit = mt7530_gpio_to_bit ( offset ) ;
mt7530_clear ( priv , MT7530_LED_GPIO_OE , bit ) ;
mt7530_clear ( priv , MT7530_LED_GPIO_DIR , bit ) ;
return 0 ;
}
static int
mt7530_gpio_direction_output ( struct gpio_chip * gc , unsigned int offset , int value )
{
struct mt7530_priv * priv = gpiochip_get_data ( gc ) ;
u32 bit = mt7530_gpio_to_bit ( offset ) ;
mt7530_set ( priv , MT7530_LED_GPIO_DIR , bit ) ;
if ( value )
mt7530_set ( priv , MT7530_LED_GPIO_DATA , bit ) ;
else
mt7530_clear ( priv , MT7530_LED_GPIO_DATA , bit ) ;
mt7530_set ( priv , MT7530_LED_GPIO_OE , bit ) ;
return 0 ;
}
static int
mt7530_setup_gpio ( struct mt7530_priv * priv )
{
struct device * dev = priv - > dev ;
struct gpio_chip * gc ;
gc = devm_kzalloc ( dev , sizeof ( * gc ) , GFP_KERNEL ) ;
if ( ! gc )
return - ENOMEM ;
mt7530_write ( priv , MT7530_LED_GPIO_OE , 0 ) ;
mt7530_write ( priv , MT7530_LED_GPIO_DIR , 0 ) ;
mt7530_write ( priv , MT7530_LED_IO_MODE , 0 ) ;
gc - > label = " mt7530 " ;
gc - > parent = dev ;
gc - > owner = THIS_MODULE ;
gc - > get_direction = mt7530_gpio_get_direction ;
gc - > direction_input = mt7530_gpio_direction_input ;
gc - > direction_output = mt7530_gpio_direction_output ;
gc - > get = mt7530_gpio_get ;
gc - > set = mt7530_gpio_set ;
gc - > base = - 1 ;
gc - > ngpio = 15 ;
gc - > can_sleep = true ;
return devm_gpiochip_add_data ( dev , gc , priv ) ;
}
2021-02-26 14:32:26 +08:00
# endif /* CONFIG_GPIOLIB */
2021-01-25 12:43:22 +08:00
2021-05-19 11:32:00 +08:00
static irqreturn_t
mt7530_irq_thread_fn ( int irq , void * dev_id )
{
struct mt7530_priv * priv = dev_id ;
bool handled = false ;
u32 val ;
int p ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2021-05-19 11:32:00 +08:00
val = mt7530_mii_read ( priv , MT7530_SYS_INT_STS ) ;
mt7530_mii_write ( priv , MT7530_SYS_INT_STS , val ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2021-05-19 11:32:00 +08:00
for ( p = 0 ; p < MT7530_NUM_PHYS ; p + + ) {
if ( BIT ( p ) & val ) {
unsigned int irq ;
irq = irq_find_mapping ( priv - > irq_domain , p ) ;
handle_nested_irq ( irq ) ;
handled = true ;
}
}
return IRQ_RETVAL ( handled ) ;
}
static void
mt7530_irq_mask ( struct irq_data * d )
{
struct mt7530_priv * priv = irq_data_get_irq_chip_data ( d ) ;
priv - > irq_enable & = ~ BIT ( d - > hwirq ) ;
}
static void
mt7530_irq_unmask ( struct irq_data * d )
{
struct mt7530_priv * priv = irq_data_get_irq_chip_data ( d ) ;
priv - > irq_enable | = BIT ( d - > hwirq ) ;
}
static void
mt7530_irq_bus_lock ( struct irq_data * d )
{
struct mt7530_priv * priv = irq_data_get_irq_chip_data ( d ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_lock ( priv ) ;
2021-05-19 11:32:00 +08:00
}
static void
mt7530_irq_bus_sync_unlock ( struct irq_data * d )
{
struct mt7530_priv * priv = irq_data_get_irq_chip_data ( d ) ;
mt7530_mii_write ( priv , MT7530_SYS_INT_EN , priv - > irq_enable ) ;
2023-04-03 02:18:16 +01:00
mt7530_mutex_unlock ( priv ) ;
2021-05-19 11:32:00 +08:00
}
static struct irq_chip mt7530_irq_chip = {
. name = KBUILD_MODNAME ,
. irq_mask = mt7530_irq_mask ,
. irq_unmask = mt7530_irq_unmask ,
. irq_bus_lock = mt7530_irq_bus_lock ,
. irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock ,
} ;
static int
mt7530_irq_map ( struct irq_domain * domain , unsigned int irq ,
irq_hw_number_t hwirq )
{
irq_set_chip_data ( irq , domain - > host_data ) ;
irq_set_chip_and_handler ( irq , & mt7530_irq_chip , handle_simple_irq ) ;
irq_set_nested_thread ( irq , true ) ;
irq_set_noprobe ( irq ) ;
return 0 ;
}
static const struct irq_domain_ops mt7530_irq_domain_ops = {
. map = mt7530_irq_map ,
. xlate = irq_domain_xlate_onecell ,
} ;
2023-04-03 02:19:40 +01:00
static void
mt7988_irq_mask ( struct irq_data * d )
{
struct mt7530_priv * priv = irq_data_get_irq_chip_data ( d ) ;
priv - > irq_enable & = ~ BIT ( d - > hwirq ) ;
mt7530_mii_write ( priv , MT7530_SYS_INT_EN , priv - > irq_enable ) ;
}
static void
mt7988_irq_unmask ( struct irq_data * d )
{
struct mt7530_priv * priv = irq_data_get_irq_chip_data ( d ) ;
priv - > irq_enable | = BIT ( d - > hwirq ) ;
mt7530_mii_write ( priv , MT7530_SYS_INT_EN , priv - > irq_enable ) ;
}
static struct irq_chip mt7988_irq_chip = {
. name = KBUILD_MODNAME ,
. irq_mask = mt7988_irq_mask ,
. irq_unmask = mt7988_irq_unmask ,
} ;
static int
mt7988_irq_map ( struct irq_domain * domain , unsigned int irq ,
irq_hw_number_t hwirq )
{
irq_set_chip_data ( irq , domain - > host_data ) ;
irq_set_chip_and_handler ( irq , & mt7988_irq_chip , handle_simple_irq ) ;
irq_set_nested_thread ( irq , true ) ;
irq_set_noprobe ( irq ) ;
return 0 ;
}
static const struct irq_domain_ops mt7988_irq_domain_ops = {
. map = mt7988_irq_map ,
. xlate = irq_domain_xlate_onecell ,
} ;
2021-05-19 11:32:00 +08:00
static void
mt7530_setup_mdio_irq ( struct mt7530_priv * priv )
{
struct dsa_switch * ds = priv - > ds ;
int p ;
for ( p = 0 ; p < MT7530_NUM_PHYS ; p + + ) {
if ( BIT ( p ) & ds - > phys_mii_mask ) {
unsigned int irq ;
irq = irq_create_mapping ( priv - > irq_domain , p ) ;
ds - > slave_mii_bus - > irq [ p ] = irq ;
}
}
}
static int
mt7530_setup_irq ( struct mt7530_priv * priv )
{
struct device * dev = priv - > dev ;
struct device_node * np = dev - > of_node ;
int ret ;
if ( ! of_property_read_bool ( np , " interrupt-controller " ) ) {
dev_info ( dev , " no interrupt support \n " ) ;
return 0 ;
}
priv - > irq = of_irq_get ( np , 0 ) ;
if ( priv - > irq < = 0 ) {
dev_err ( dev , " failed to get parent IRQ: %d \n " , priv - > irq ) ;
return priv - > irq ? : - EINVAL ;
}
2023-04-03 02:19:40 +01:00
if ( priv - > id = = ID_MT7988 )
priv - > irq_domain = irq_domain_add_linear ( np , MT7530_NUM_PHYS ,
& mt7988_irq_domain_ops ,
priv ) ;
else
priv - > irq_domain = irq_domain_add_linear ( np , MT7530_NUM_PHYS ,
& mt7530_irq_domain_ops ,
priv ) ;
2021-05-19 11:32:00 +08:00
if ( ! priv - > irq_domain ) {
dev_err ( dev , " failed to create IRQ domain \n " ) ;
return - ENOMEM ;
}
/* This register must be set for MT7530 to properly fire interrupts */
if ( priv - > id ! = ID_MT7531 )
mt7530_set ( priv , MT7530_TOP_SIG_CTRL , TOP_SIG_CTRL_NORMAL ) ;
ret = request_threaded_irq ( priv - > irq , NULL , mt7530_irq_thread_fn ,
IRQF_ONESHOT , KBUILD_MODNAME , priv ) ;
if ( ret ) {
irq_domain_remove ( priv - > irq_domain ) ;
dev_err ( dev , " failed to request IRQ: %d \n " , ret ) ;
return ret ;
}
return 0 ;
}
static void
mt7530_free_mdio_irq ( struct mt7530_priv * priv )
{
int p ;
for ( p = 0 ; p < MT7530_NUM_PHYS ; p + + ) {
if ( BIT ( p ) & priv - > ds - > phys_mii_mask ) {
unsigned int irq ;
irq = irq_find_mapping ( priv - > irq_domain , p ) ;
irq_dispose_mapping ( irq ) ;
}
}
}
static void
mt7530_free_irq_common ( struct mt7530_priv * priv )
{
free_irq ( priv - > irq , priv ) ;
irq_domain_remove ( priv - > irq_domain ) ;
}
static void
mt7530_free_irq ( struct mt7530_priv * priv )
{
mt7530_free_mdio_irq ( priv ) ;
mt7530_free_irq_common ( priv ) ;
}
static int
mt7530_setup_mdio ( struct mt7530_priv * priv )
{
struct dsa_switch * ds = priv - > ds ;
struct device * dev = priv - > dev ;
struct mii_bus * bus ;
static int idx ;
int ret ;
bus = devm_mdiobus_alloc ( dev ) ;
if ( ! bus )
return - ENOMEM ;
ds - > slave_mii_bus = bus ;
bus - > priv = priv ;
bus - > name = KBUILD_MODNAME " -mii " ;
snprintf ( bus - > id , MII_BUS_ID_SIZE , KBUILD_MODNAME " -%d " , idx + + ) ;
2023-01-17 00:52:16 +01:00
bus - > read = mt753x_phy_read_c22 ;
bus - > write = mt753x_phy_write_c22 ;
bus - > read_c45 = mt753x_phy_read_c45 ;
bus - > write_c45 = mt753x_phy_write_c45 ;
2021-05-19 11:32:00 +08:00
bus - > parent = dev ;
bus - > phy_mask = ~ ds - > phys_mii_mask ;
if ( priv - > irq )
mt7530_setup_mdio_irq ( priv ) ;
2022-02-07 18:15:52 +02:00
ret = devm_mdiobus_register ( dev , bus ) ;
2021-05-19 11:32:00 +08:00
if ( ret ) {
dev_err ( dev , " failed to register MDIO bus: %d \n " , ret ) ;
if ( priv - > irq )
mt7530_free_mdio_irq ( priv ) ;
}
return ret ;
}
2017-04-07 16:45:09 +08:00
static int
mt7530_setup ( struct dsa_switch * ds )
{
struct mt7530_priv * priv = ds - > priv ;
2022-06-10 19:05:38 +02:00
struct device_node * dn = NULL ;
2019-09-02 15:02:26 +02:00
struct device_node * phy_node ;
struct device_node * mac_np ;
2017-04-07 16:45:09 +08:00
struct mt7530_dummy_poll p ;
2019-09-02 15:02:26 +02:00
phy_interface_t interface ;
2022-06-10 19:05:38 +02:00
struct dsa_port * cpu_dp ;
2019-09-02 15:02:24 +02:00
u32 id , val ;
int ret , i ;
2017-04-07 16:45:09 +08:00
2017-09-20 12:28:05 -04:00
/* The parent node of master netdev which holds the common system
2017-04-07 16:45:09 +08:00
* controller also is the container for two GMACs nodes representing
* as two netdev instances .
*/
2022-06-10 19:05:38 +02:00
dsa_switch_for_each_cpu_port ( cpu_dp , ds ) {
dn = cpu_dp - > master - > dev . of_node - > parent ;
/* It doesn't matter which CPU port is found first,
* their masters should share the same parent OF node
*/
break ;
}
if ( ! dn ) {
dev_err ( ds - > dev , " parent OF node of DSA master not found " ) ;
return - EINVAL ;
}
net: dsa: mt7530: enable assisted learning on CPU port
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Address learning is enabled on offloaded ports (swp0~2) and the CPU
port, so when client A sends a packet to C, the following will happen:
1. The switch learns that client A can be reached at swp0.
2. The switch probably already knows that client C can be reached at the
CPU port, so it forwards the packet to the CPU.
3. The bridge core knows client C can be reached at bond0, so it
forwards the packet back to the switch.
4. The switch learns that client A can be reached at the CPU port.
5. The switch forwards the packet to either swp3 or swp4, according to
the packet's tag.
That makes client A's MAC address flap between swp0 and the CPU port. If
client B sends a packet to A, it is possible that the packet is
forwarded to the CPU. With offload_fwd_mark = 1, the bridge core won't
forward it back to the switch, resulting in packet loss.
As we have the assisted_learning_on_cpu_port in DSA core now, enable
that and disable hardware learning on the CPU port.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <oltean@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:01 +08:00
ds - > assisted_learning_on_cpu_port = true ;
2020-12-11 01:03:22 +08:00
ds - > mtu_enforcement_ingress = true ;
2017-04-07 16:45:09 +08:00
2019-01-30 11:24:05 +10:00
if ( priv - > id = = ID_MT7530 ) {
regulator_set_voltage ( priv - > core_pwr , 1000000 , 1000000 ) ;
ret = regulator_enable ( priv - > core_pwr ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev ,
" Failed to enable core power: %d \n " , ret ) ;
return ret ;
}
2017-04-07 16:45:09 +08:00
2019-01-30 11:24:05 +10:00
regulator_set_voltage ( priv - > io_pwr , 3300000 , 3300000 ) ;
ret = regulator_enable ( priv - > io_pwr ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " Failed to enable io pwr: %d \n " ,
ret ) ;
return ret ;
}
2017-04-07 16:45:09 +08:00
}
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
if ( priv - > mcm ) {
reset_control_assert ( priv - > rstc ) ;
usleep_range ( 1000 , 1100 ) ;
reset_control_deassert ( priv - > rstc ) ;
} else {
gpiod_set_value_cansleep ( priv - > reset , 0 ) ;
usleep_range ( 1000 , 1100 ) ;
gpiod_set_value_cansleep ( priv - > reset , 1 ) ;
}
/* Waiting for MT7530 got to stable */
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7530_HWTRAP ) ;
ret = readx_poll_timeout ( _mt7530_read , & p , val , val ! = 0 ,
20 , 1000000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " reset timeout \n " ) ;
return ret ;
}
id = mt7530_read ( priv , MT7530_CREV ) ;
id > > = CHIP_NAME_SHIFT ;
if ( id ! = MT7530_ID ) {
dev_err ( priv - > dev , " chip %x can't be supported \n " , id ) ;
return - ENODEV ;
}
/* Reset the switch through internal reset */
mt7530_write ( priv , MT7530_SYS_CTRL ,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST ) ;
net: dsa: mt7530: permit port 5 to work without port 6 on MT7621 SoC
The MT7530 switch from the MT7621 SoC has 2 ports which can be set up as
internal: port 5 and 6. Arınç reports that the GMAC1 attached to port 5
receives corrupted frames, unless port 6 (attached to GMAC0) has been
brought up by the driver. This is true regardless of whether port 5 is
used as a user port or as a CPU port (carrying DSA tags).
Offline debugging (blind for me) which began in the linked thread showed
experimentally that the configuration done by the driver for port 6
contains a step which is needed by port 5 as well - the write to
CORE_GSWPLL_GRP2 (note that I've no idea as to what it does, apart from
the comment "Set core clock into 500Mhz"). Prints put by Arınç show that
the reset value of CORE_GSWPLL_GRP2 is RG_GSWPLL_POSDIV_500M(1) |
RG_GSWPLL_FBKDIV_500M(40) (0x128), both on the MCM MT7530 from the
MT7621 SoC, as well as on the standalone MT7530 from MT7623NI Bananapi
BPI-R2. Apparently, port 5 on the standalone MT7530 can work under both
values of the register, while on the MT7621 SoC it cannot.
The call path that triggers the register write is:
mt753x_phylink_mac_config() for port 6
-> mt753x_pad_setup()
-> mt7530_pad_clk_setup()
so this fully explains the behavior noticed by Arınç, that bringing port
6 up is necessary.
The simplest fix for the problem is to extract the register writes which
are needed for both port 5 and 6 into a common mt7530_pll_setup()
function, which is called at mt7530_setup() time, immediately after
switch reset. We can argue that this mirrors the code layout introduced
in mt7531_setup() by commit 42bc4fafe359 ("net: mt7531: only do PLL once
after the reset"), in that the PLL setup has the exact same positioning,
and further work to consolidate the separate setup() functions is not
hindered.
Testing confirms that:
- the slight reordering of writes to MT7530_P6ECR and to
CORE_GSWPLL_GRP1 / CORE_GSWPLL_GRP2 introduced by this change does not
appear to cause problems for the operation of port 6 on MT7621 and on
MT7623 (where port 5 also always worked)
- packets sent through port 5 are not corrupted anymore, regardless of
whether port 6 is enabled by phylink or not (or even present in the
device tree)
My algorithm for determining the Fixes: tag is as follows. Testing shows
that some logic from mt7530_pad_clk_setup() is needed even for port 5.
Prior to commit ca366d6c889b ("net: dsa: mt7530: Convert to PHYLINK
API"), a call did exist for all phy_is_pseudo_fixed_link() ports - so
port 5 included. That commit replaced it with a temporary "Port 5 is not
supported!" comment, and the following commit 38f790a80560 ("net: dsa:
mt7530: Add support for port 5") replaced that comment with a
configuration procedure in mt7530_setup_port5() which was insufficient
for port 5 to work. I'm laying the blame on the patch that claimed
support for port 5, although one would have also needed the change from
commit c3b8e07909db ("net: dsa: mt7530: setup core clock even in TRGMII
mode") for the write to be performed completely independently from port
6's configuration.
Thanks go to Arınç for describing the problem, for debugging and for
testing.
Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Link: https://lore.kernel.org/netdev/f297c2c4-6e7c-57ac-2394-f6025d309b9d@arinc9.com/
Fixes: 38f790a80560 ("net: dsa: mt7530: Add support for port 5")
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230307155411.868573-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-03-07 17:54:11 +02:00
mt7530_pll_setup ( priv ) ;
2023-03-20 22:05:19 +03:00
/* Lower Tx driving for TRGMII path */
for ( i = 0 ; i < NUM_TRGMII_CTRL ; i + + )
mt7530_write ( priv , MT7530_TRGMII_TD_ODT ( i ) ,
TD_DM_DRVP ( 8 ) | TD_DM_DRVN ( 8 ) ) ;
for ( i = 0 ; i < NUM_TRGMII_CTRL ; i + + )
mt7530_rmw ( priv , MT7530_TRGMII_RD ( i ) ,
RD_TAP_MASK , RD_TAP ( 16 ) ) ;
2023-03-10 10:33:37 +03:00
/* Enable port 6 */
2017-04-07 16:45:09 +08:00
val = mt7530_read ( priv , MT7530_MHWTRAP ) ;
val & = ~ MHWTRAP_P6_DIS & ~ MHWTRAP_PHY_ACCESS ;
val | = MHWTRAP_MANUAL ;
mt7530_write ( priv , MT7530_MHWTRAP , val ) ;
2019-09-02 15:02:24 +02:00
priv - > p6_interface = PHY_INTERFACE_MODE_NA ;
2023-06-17 09:26:46 +03:00
mt753x_trap_frames ( priv ) ;
2017-04-07 16:45:09 +08:00
/* Enable and reset MIB counters */
mt7530_mib_reset ( ds ) ;
for ( i = 0 ; i < MT7530_NUM_PORTS ; i + + ) {
/* Disable forwarding by default on all ports */
mt7530_rmw ( priv , MT7530_PCR_P ( i ) , PCR_MATRIX_MASK ,
PCR_MATRIX_CLR ) ;
net: dsa: mt7530: enable assisted learning on CPU port
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Address learning is enabled on offloaded ports (swp0~2) and the CPU
port, so when client A sends a packet to C, the following will happen:
1. The switch learns that client A can be reached at swp0.
2. The switch probably already knows that client C can be reached at the
CPU port, so it forwards the packet to the CPU.
3. The bridge core knows client C can be reached at bond0, so it
forwards the packet back to the switch.
4. The switch learns that client A can be reached at the CPU port.
5. The switch forwards the packet to either swp3 or swp4, according to
the packet's tag.
That makes client A's MAC address flap between swp0 and the CPU port. If
client B sends a packet to A, it is possible that the packet is
forwarded to the CPU. With offload_fwd_mark = 1, the bridge core won't
forward it back to the switch, resulting in packet loss.
As we have the assisted_learning_on_cpu_port in DSA core now, enable
that and disable hardware learning on the CPU port.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <oltean@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:01 +08:00
/* Disable learning by default on all ports */
mt7530_set ( priv , MT7530_PSC_P ( i ) , SA_DIS ) ;
2020-09-19 20:28:10 +01:00
if ( dsa_is_cpu_port ( ds , i ) ) {
ret = mt753x_cpu_port_enable ( ds , i ) ;
if ( ret )
return ret ;
2021-03-16 01:09:40 +08:00
} else {
2019-02-24 20:44:43 +01:00
mt7530_port_disable ( ds , i ) ;
net: dsa: mt7530: use independent VLAN learning on VLAN-unaware bridges
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Ideally, when the switch receives a packet from swp3 or swp4, it should
forward the packet to the CPU, according to the port matrix and unknown
unicast flood settings.
But packet loss will happen if the destination address is at one of the
offloaded ports (swp0~2). For example, when client C sends a packet to
A, the FDB lookup will indicate that it should be forwarded to swp0, but
the port matrix of swp3 and swp4 is configured to only allow the CPU to
be its destination, so it is dropped.
However, this issue does not happen if the bridge is VLAN-aware. That is
because VLAN-aware bridges use independent VLAN learning, i.e. use VID
for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded,
shared VLAN learning with default filter ID of 0 is used instead. So the
lookup for A with filter ID 0 never hits and the packet can be forwarded
to the CPU.
In the current code, only two combinations were used to toggle user
ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with
PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to
security mode with PVC.VLAN_ATTR set to user port.
It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and
port matrix mode just skips the VLAN table lookup. The reference manual
is somehow misleading when describing PORT_VLAN modes. It states that
PORT_MEM (VLAN port member) is used for destination if the VLAN table
lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of
VLAN port member and port matrix) is used instead, which means we can
have two or more separate VLAN-aware bridges with the same PVID and
traffic won't leak between them.
Therefore, to solve this, enable independent VLAN learning with PVID 0
on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback
mode, while leaving standalone ports in port matrix mode. The CPU port
is always set to fallback mode to serve those bridges.
During testing, it is found that FDB lookup with filter ID of 0 will
also hit entries with VID 0 even with independent VLAN learning. To
avoid that, install all VLANs with filter ID of 1.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:02 +08:00
/* Set default PVID to 0 on all user ports */
mt7530_rmw ( priv , MT7530_PPBV1_P ( i ) , G0_PORT_VID_MASK ,
G0_PORT_VID_DEF ) ;
2021-03-16 01:09:40 +08:00
}
2020-04-14 14:34:08 +08:00
/* Enable consistent egress tag */
mt7530_rmw ( priv , MT7530_PVC_P ( i ) , PVC_EG_TAG_MASK ,
PVC_EG_TAG ( MT7530_VLAN_EG_CONSISTENT ) ) ;
2017-04-07 16:45:09 +08:00
}
2021-08-25 00:52:52 +08:00
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0 ( priv ) ;
if ( ret )
return ret ;
2019-09-02 15:02:26 +02:00
/* Setup port 5 */
priv - > p5_intf_sel = P5_DISABLED ;
interface = PHY_INTERFACE_MODE_NA ;
if ( ! dsa_is_unused_port ( ds , 5 ) ) {
priv - > p5_intf_sel = P5_INTF_SEL_GMAC5 ;
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 02:40:33 +01:00
ret = of_get_phy_mode ( dsa_to_port ( ds , 5 ) - > dn , & interface ) ;
if ( ret & & ret ! = - ENODEV )
return ret ;
2019-09-02 15:02:26 +02:00
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node ( dn , mac_np ) {
if ( ! of_device_is_compatible ( mac_np ,
" mediatek,eth-mac " ) )
continue ;
ret = of_property_read_u32 ( mac_np , " reg " , & id ) ;
if ( ret < 0 | | id ! = 1 )
continue ;
phy_node = of_parse_phandle ( mac_np , " phy-handle " , 0 ) ;
2020-04-03 19:28:24 +08:00
if ( ! phy_node )
continue ;
2019-09-02 15:02:26 +02:00
if ( phy_node - > parent = = priv - > dev - > of_node - > parent ) {
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 02:40:33 +01:00
ret = of_get_phy_mode ( mac_np , & interface ) ;
2020-08-25 01:33:11 +05:30
if ( ret & & ret ! = - ENODEV ) {
of_node_put ( mac_np ) ;
2022-04-28 17:53:17 +08:00
of_node_put ( phy_node ) ;
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 02:40:33 +01:00
return ret ;
2020-08-25 01:33:11 +05:30
}
2019-09-02 15:02:26 +02:00
id = of_mdio_parse_addr ( ds - > dev , phy_node ) ;
if ( id = = 0 )
priv - > p5_intf_sel = P5_INTF_SEL_PHY_P0 ;
if ( id = = 4 )
priv - > p5_intf_sel = P5_INTF_SEL_PHY_P4 ;
}
2020-08-25 01:33:11 +05:30
of_node_put ( mac_np ) ;
2019-09-02 15:02:26 +02:00
of_node_put ( phy_node ) ;
break ;
}
}
2021-02-26 14:32:26 +08:00
# ifdef CONFIG_GPIOLIB
2021-01-25 12:43:22 +08:00
if ( of_property_read_bool ( priv - > dev - > of_node , " gpio-controller " ) ) {
ret = mt7530_setup_gpio ( priv ) ;
if ( ret )
return ret ;
}
2021-02-26 14:32:26 +08:00
# endif /* CONFIG_GPIOLIB */
2021-01-25 12:43:22 +08:00
2019-09-02 15:02:26 +02:00
mt7530_setup_port5 ( ds , interface ) ;
2017-04-07 16:45:09 +08:00
/* Flush the FDB table */
2018-04-02 16:24:14 -07:00
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_FLUSH , NULL ) ;
2017-04-07 16:45:09 +08:00
if ( ret < 0 )
return ret ;
return 0 ;
}
2023-04-03 02:19:02 +01:00
static int
mt7531_setup_common ( struct dsa_switch * ds )
{
struct mt7530_priv * priv = ds - > priv ;
int ret , i ;
2023-06-17 09:26:46 +03:00
mt753x_trap_frames ( priv ) ;
2023-04-03 02:19:02 +01:00
/* Enable and reset MIB counters */
mt7530_mib_reset ( ds ) ;
net: dsa: mt7530: fix network connectivity with multiple CPU ports
On mt753x_cpu_port_enable() there's code that enables flooding for the CPU
port only. Since mt753x_cpu_port_enable() runs twice when both CPU ports
are enabled, port 6 becomes the only port to forward the frames to. But
port 5 is the active port, so no frames received from the user ports will
be forwarded to port 5 which breaks network connectivity.
Every bit of the BC_FFP, UNM_FFP, and UNU_FFP bits represents a port. Fix
this issue by setting the bit that corresponds to the CPU port without
overwriting the other bits.
Clear the bits beforehand only for the MT7531 switch. According to the
documents MT7621 Giga Switch Programming Guide v0.3 and MT7531 Reference
Manual for Development Board v1.0, after reset, the BC_FFP, UNM_FFP, and
UNU_FFP bits are set to 1 for MT7531, 0 for MT7530.
The commit 5e5502e012b8 ("net: dsa: mt7530: fix roaming from DSA user
ports") silently changed the method to set the bits on the MT7530_MFC.
Instead of clearing the relevant bits before mt7530_cpu_port_enable()
which runs under a for loop, the commit started doing it on
mt7530_cpu_port_enable().
Back then, this didn't really matter as only a single CPU port could be
used since the CPU port number was hardcoded. The driver was later changed
with commit 1f9a6abecf53 ("net: dsa: mt7530: get cpu-port via dp->cpu_dp
instead of constant") to retrieve the CPU port via dp->cpu_dp. With that,
this silent change became an issue for when using multiple CPU ports.
Fixes: 5e5502e012b8 ("net: dsa: mt7530: fix roaming from DSA user ports")
Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-05-03 00:09:47 +03:00
/* Disable flooding on all ports */
mt7530_clear ( priv , MT7530_MFC , BC_FFP_MASK | UNM_FFP_MASK |
UNU_FFP_MASK ) ;
2023-04-03 02:19:02 +01:00
for ( i = 0 ; i < MT7530_NUM_PORTS ; i + + ) {
/* Disable forwarding by default on all ports */
mt7530_rmw ( priv , MT7530_PCR_P ( i ) , PCR_MATRIX_MASK ,
PCR_MATRIX_CLR ) ;
/* Disable learning by default on all ports */
mt7530_set ( priv , MT7530_PSC_P ( i ) , SA_DIS ) ;
mt7530_set ( priv , MT7531_DBG_CNT ( i ) , MT7531_DIS_CLR ) ;
if ( dsa_is_cpu_port ( ds , i ) ) {
ret = mt753x_cpu_port_enable ( ds , i ) ;
if ( ret )
return ret ;
} else {
mt7530_port_disable ( ds , i ) ;
/* Set default PVID to 0 on all user ports */
mt7530_rmw ( priv , MT7530_PPBV1_P ( i ) , G0_PORT_VID_MASK ,
G0_PORT_VID_DEF ) ;
}
/* Enable consistent egress tag */
mt7530_rmw ( priv , MT7530_PVC_P ( i ) , PVC_EG_TAG_MASK ,
PVC_EG_TAG ( MT7530_VLAN_EG_CONSISTENT ) ) ;
}
/* Flush the FDB table */
ret = mt7530_fdb_cmd ( priv , MT7530_FDB_FLUSH , NULL ) ;
if ( ret < 0 )
return ret ;
return 0 ;
}
2020-09-11 21:48:54 +08:00
static int
mt7531_setup ( struct dsa_switch * ds )
{
struct mt7530_priv * priv = ds - > priv ;
struct mt7530_dummy_poll p ;
u32 val , id ;
int ret , i ;
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
if ( priv - > mcm ) {
reset_control_assert ( priv - > rstc ) ;
usleep_range ( 1000 , 1100 ) ;
reset_control_deassert ( priv - > rstc ) ;
} else {
gpiod_set_value_cansleep ( priv - > reset , 0 ) ;
usleep_range ( 1000 , 1100 ) ;
gpiod_set_value_cansleep ( priv - > reset , 1 ) ;
}
/* Waiting for MT7530 got to stable */
INIT_MT7530_DUMMY_POLL ( & p , priv , MT7530_HWTRAP ) ;
ret = readx_poll_timeout ( _mt7530_read , & p , val , val ! = 0 ,
20 , 1000000 ) ;
if ( ret < 0 ) {
dev_err ( priv - > dev , " reset timeout \n " ) ;
return ret ;
}
id = mt7530_read ( priv , MT7531_CREV ) ;
id > > = CHIP_NAME_SHIFT ;
if ( id ! = MT7531_ID ) {
dev_err ( priv - > dev , " chip %x can't be supported \n " , id ) ;
return - ENODEV ;
}
2022-09-17 02:07:34 +02:00
/* all MACs must be forced link-down before sw reset */
for ( i = 0 ; i < MT7530_NUM_PORTS ; i + + )
mt7530_write ( priv , MT7530_PMCR_P ( i ) , MT7531_FORCE_LNK ) ;
2020-09-11 21:48:54 +08:00
/* Reset the switch through internal reset */
mt7530_write ( priv , MT7530_SYS_CTRL ,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST ) ;
2022-09-17 02:07:33 +02:00
mt7531_pll_setup ( priv ) ;
2020-09-11 21:48:54 +08:00
if ( mt7531_dual_sgmii_supported ( priv ) ) {
priv - > p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII ;
/* Let ds->slave_mii_bus be able to access external phy. */
mt7530_rmw ( priv , MT7531_GPIO_MODE1 , MT7531_GPIO11_RG_RXD2_MASK ,
MT7531_EXT_P_MDC_11 ) ;
mt7530_rmw ( priv , MT7531_GPIO_MODE1 , MT7531_GPIO12_RG_RXD3_MASK ,
MT7531_EXT_P_MDIO_12 ) ;
} else {
priv - > p5_intf_sel = P5_INTF_SEL_GMAC5 ;
}
dev_dbg ( ds - > dev , " P5 support %s interface \n " ,
p5_intf_modes ( priv - > p5_intf_sel ) ) ;
mt7530_rmw ( priv , MT7531_GPIO_MODE0 , MT7531_GPIO0_MASK ,
MT7531_GPIO0_INTERRUPT ) ;
/* Let phylink decide the interface later. */
priv - > p5_interface = PHY_INTERFACE_MODE_NA ;
priv - > p6_interface = PHY_INTERFACE_MODE_NA ;
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_ [ read , write ] _mmd_indirect is called , we provide
* our own mt7531_ind_mmd_phy_ [ read , write ] to complete this
* function .
*/
val = mt7531_ind_c45_phy_read ( priv , MT753X_CTRL_PHY_ADDR ,
MDIO_MMD_VEND2 , CORE_PLL_GROUP4 ) ;
val | = MT7531_PHY_PLL_BYPASS_MODE ;
val & = ~ MT7531_PHY_PLL_OFF ;
mt7531_ind_c45_phy_write ( priv , MT753X_CTRL_PHY_ADDR , MDIO_MMD_VEND2 ,
CORE_PLL_GROUP4 , val ) ;
2023-04-03 02:19:02 +01:00
mt7531_setup_common ( ds ) ;
2020-09-11 21:48:54 +08:00
2021-08-25 00:52:52 +08:00
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0 ( priv ) ;
if ( ret )
return ret ;
net: dsa: mt7530: enable assisted learning on CPU port
Consider the following bridge configuration, where bond0 is not
offloaded:
+-- br0 --+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
. . .
. . .
A B C
Address learning is enabled on offloaded ports (swp0~2) and the CPU
port, so when client A sends a packet to C, the following will happen:
1. The switch learns that client A can be reached at swp0.
2. The switch probably already knows that client C can be reached at the
CPU port, so it forwards the packet to the CPU.
3. The bridge core knows client C can be reached at bond0, so it
forwards the packet back to the switch.
4. The switch learns that client A can be reached at the CPU port.
5. The switch forwards the packet to either swp3 or swp4, according to
the packet's tag.
That makes client A's MAC address flap between swp0 and the CPU port. If
client B sends a packet to A, it is possible that the packet is
forwarded to the CPU. With offload_fwd_mark = 1, the bridge core won't
forward it back to the switch, resulting in packet loss.
As we have the assisted_learning_on_cpu_port in DSA core now, enable
that and disable hardware learning on the CPU port.
Signed-off-by: DENG Qingfang <dqfext@gmail.com>
Reviewed-by: Vladimir Oltean <oltean@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-04 00:04:01 +08:00
ds - > assisted_learning_on_cpu_port = true ;
2020-12-11 01:03:22 +08:00
ds - > mtu_enforcement_ingress = true ;
2020-09-11 21:48:54 +08:00
return 0 ;
}
2022-04-11 10:46:01 +01:00
static void mt7530_mac_port_get_caps ( struct dsa_switch * ds , int port ,
struct phylink_config * config )
{
switch ( port ) {
case 0 . . . 4 : /* Internal phy */
__set_bit ( PHY_INTERFACE_MODE_GMII ,
config - > supported_interfaces ) ;
break ;
case 5 : /* 2nd cpu port with phy of port 0 or 4 / external phy */
phy_interface_set_rgmii ( config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_MII ,
config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_GMII ,
config - > supported_interfaces ) ;
break ;
case 6 : /* 1st cpu port */
__set_bit ( PHY_INTERFACE_MODE_RGMII ,
config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_TRGMII ,
config - > supported_interfaces ) ;
break ;
}
}
2020-09-11 21:48:54 +08:00
static bool mt7531_is_rgmii_port ( struct mt7530_priv * priv , u32 port )
{
return ( port = = 5 ) & & ( priv - > p5_intf_sel ! = P5_INTF_SEL_GMAC5_SGMII ) ;
}
2022-04-11 10:46:01 +01:00
static void mt7531_mac_port_get_caps ( struct dsa_switch * ds , int port ,
struct phylink_config * config )
{
struct mt7530_priv * priv = ds - > priv ;
switch ( port ) {
case 0 . . . 4 : /* Internal phy */
__set_bit ( PHY_INTERFACE_MODE_GMII ,
config - > supported_interfaces ) ;
break ;
case 5 : /* 2nd cpu port supports either rgmii or sgmii/8023z */
if ( mt7531_is_rgmii_port ( priv , port ) ) {
phy_interface_set_rgmii ( config - > supported_interfaces ) ;
break ;
}
fallthrough ;
case 6 : /* 1st cpu port supports sgmii/8023z only */
__set_bit ( PHY_INTERFACE_MODE_SGMII ,
config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_1000BASEX ,
config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_2500BASEX ,
config - > supported_interfaces ) ;
config - > mac_capabilities | = MAC_2500FD ;
break ;
}
}
2023-04-03 02:19:40 +01:00
static void mt7988_mac_port_get_caps ( struct dsa_switch * ds , int port ,
struct phylink_config * config )
{
phy_interface_zero ( config - > supported_interfaces ) ;
switch ( port ) {
case 0 . . . 4 : /* Internal phy */
__set_bit ( PHY_INTERFACE_MODE_INTERNAL ,
config - > supported_interfaces ) ;
break ;
case 6 :
__set_bit ( PHY_INTERFACE_MODE_INTERNAL ,
config - > supported_interfaces ) ;
config - > mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10000FD ;
}
}
2020-09-11 21:48:52 +08:00
static int
mt753x_pad_setup ( struct dsa_switch * ds , const struct phylink_link_state * state )
{
struct mt7530_priv * priv = ds - > priv ;
return priv - > info - > pad_setup ( ds , state - > interface ) ;
}
static int
mt7530_mac_config ( struct dsa_switch * ds , int port , unsigned int mode ,
phy_interface_t interface )
{
struct mt7530_priv * priv = ds - > priv ;
/* Only need to setup port5. */
if ( port ! = 5 )
return 0 ;
mt7530_setup_port5 ( priv - > ds , interface ) ;
return 0 ;
}
2020-09-11 21:48:54 +08:00
static int mt7531_rgmii_setup ( struct mt7530_priv * priv , u32 port ,
phy_interface_t interface ,
struct phy_device * phydev )
{
u32 val ;
if ( ! mt7531_is_rgmii_port ( priv , port ) ) {
dev_err ( priv - > dev , " RGMII mode is not available for port %d \n " ,
port ) ;
return - EINVAL ;
}
val = mt7530_read ( priv , MT7531_CLKGEN_CTRL ) ;
val | = GP_CLK_EN ;
val & = ~ GP_MODE_MASK ;
val | = GP_MODE ( MT7531_GP_MODE_RGMII ) ;
val & = ~ CLK_SKEW_IN_MASK ;
val | = CLK_SKEW_IN ( MT7531_CLK_SKEW_NO_CHG ) ;
val & = ~ CLK_SKEW_OUT_MASK ;
val | = CLK_SKEW_OUT ( MT7531_CLK_SKEW_NO_CHG ) ;
val | = TXCLK_NO_REVERSE | RXCLK_NO_DELAY ;
/* Do not adjust rgmii delay when vendor phy driver presents. */
if ( ! phydev | | phy_driver_is_genphy ( phydev ) ) {
val & = ~ ( TXCLK_NO_REVERSE | RXCLK_NO_DELAY ) ;
switch ( interface ) {
case PHY_INTERFACE_MODE_RGMII :
val | = TXCLK_NO_REVERSE ;
val | = RXCLK_NO_DELAY ;
break ;
case PHY_INTERFACE_MODE_RGMII_RXID :
val | = TXCLK_NO_REVERSE ;
break ;
case PHY_INTERFACE_MODE_RGMII_TXID :
val | = RXCLK_NO_DELAY ;
break ;
case PHY_INTERFACE_MODE_RGMII_ID :
break ;
default :
return - EINVAL ;
}
}
mt7530_write ( priv , MT7531_CLKGEN_CTRL , val ) ;
return 0 ;
}
static bool mt753x_is_mac_port ( u32 port )
{
return ( port = = 5 | | port = = 6 ) ;
}
2023-04-03 02:19:40 +01:00
static int
mt7988_mac_config ( struct dsa_switch * ds , int port , unsigned int mode ,
phy_interface_t interface )
{
if ( dsa_is_cpu_port ( ds , port ) & &
interface = = PHY_INTERFACE_MODE_INTERNAL )
return 0 ;
return - EINVAL ;
}
2020-09-11 21:48:54 +08:00
static int
mt7531_mac_config ( struct dsa_switch * ds , int port , unsigned int mode ,
phy_interface_t interface )
{
struct mt7530_priv * priv = ds - > priv ;
struct phy_device * phydev ;
struct dsa_port * dp ;
if ( ! mt753x_is_mac_port ( port ) ) {
dev_err ( priv - > dev , " port %d is not a MAC port \n " , port ) ;
return - EINVAL ;
}
switch ( interface ) {
case PHY_INTERFACE_MODE_RGMII :
case PHY_INTERFACE_MODE_RGMII_ID :
case PHY_INTERFACE_MODE_RGMII_RXID :
case PHY_INTERFACE_MODE_RGMII_TXID :
dp = dsa_to_port ( ds , port ) ;
phydev = dp - > slave - > phydev ;
return mt7531_rgmii_setup ( priv , port , interface , phydev ) ;
case PHY_INTERFACE_MODE_SGMII :
case PHY_INTERFACE_MODE_NA :
case PHY_INTERFACE_MODE_1000BASEX :
case PHY_INTERFACE_MODE_2500BASEX :
2023-03-19 12:58:43 +00:00
/* handled in SGMII PCS driver */
return 0 ;
2020-09-11 21:48:54 +08:00
default :
return - EINVAL ;
}
return - EINVAL ;
}
2020-09-11 21:48:52 +08:00
static int
mt753x_mac_config ( struct dsa_switch * ds , int port , unsigned int mode ,
const struct phylink_link_state * state )
{
struct mt7530_priv * priv = ds - > priv ;
return priv - > info - > mac_port_config ( ds , port , mode , state - > interface ) ;
}
2022-04-11 10:46:27 +01:00
static struct phylink_pcs *
mt753x_phylink_mac_select_pcs ( struct dsa_switch * ds , int port ,
phy_interface_t interface )
{
struct mt7530_priv * priv = ds - > priv ;
switch ( interface ) {
case PHY_INTERFACE_MODE_TRGMII :
2023-03-19 12:58:43 +00:00
return & priv - > pcs [ port ] . pcs ;
2022-04-11 10:46:27 +01:00
case PHY_INTERFACE_MODE_SGMII :
case PHY_INTERFACE_MODE_1000BASEX :
case PHY_INTERFACE_MODE_2500BASEX :
2023-03-19 12:58:43 +00:00
return priv - > ports [ port ] . sgmii_pcs ;
2022-04-11 10:46:27 +01:00
default :
return NULL ;
}
}
2020-09-11 21:48:52 +08:00
static void
mt753x_phylink_mac_config ( struct dsa_switch * ds , int port , unsigned int mode ,
const struct phylink_link_state * state )
{
struct mt7530_priv * priv = ds - > priv ;
u32 mcr_cur , mcr_new ;
switch ( port ) {
case 0 . . . 4 : /* Internal phy */
2023-04-03 02:19:40 +01:00
if ( state - > interface ! = PHY_INTERFACE_MODE_GMII & &
state - > interface ! = PHY_INTERFACE_MODE_INTERNAL )
2020-09-11 21:48:52 +08:00
goto unsupported ;
break ;
case 5 : /* 2nd cpu port with phy of port 0 or 4 / external phy */
if ( priv - > p5_interface = = state - > interface )
break ;
if ( mt753x_mac_config ( ds , port , mode , state ) < 0 )
goto unsupported ;
2019-09-02 15:02:26 +02:00
2020-09-11 21:48:54 +08:00
if ( priv - > p5_intf_sel ! = P5_DISABLED )
priv - > p5_interface = state - > interface ;
2019-09-02 15:02:26 +02:00
break ;
2019-09-02 15:02:24 +02:00
case 6 : /* 1st cpu port */
if ( priv - > p6_interface = = state - > interface )
break ;
2020-09-11 21:48:52 +08:00
mt753x_pad_setup ( ds , state ) ;
2019-09-02 15:02:24 +02:00
2020-09-11 21:48:52 +08:00
if ( mt753x_mac_config ( ds , port , mode , state ) < 0 )
goto unsupported ;
2019-09-02 15:02:24 +02:00
priv - > p6_interface = state - > interface ;
break ;
default :
2020-09-11 21:48:52 +08:00
unsupported :
dev_err ( ds - > dev , " %s: unsupported %s port: %i \n " ,
__func__ , phy_modes ( state - > interface ) , port ) ;
2019-09-02 15:02:24 +02:00
return ;
}
mcr_cur = mt7530_read ( priv , MT7530_PMCR_P ( port ) ) ;
mcr_new = mcr_cur ;
2020-03-27 15:44:12 +01:00
mcr_new & = ~ PMCR_LINK_SETTINGS_MASK ;
2019-09-02 15:02:24 +02:00
mcr_new | = PMCR_IFG_XMIT ( 1 ) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
2020-09-11 21:48:54 +08:00
PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID ( priv - > id ) ;
2019-09-02 15:02:24 +02:00
2019-09-02 15:02:26 +02:00
/* Are we connected to external phy */
if ( port = = 5 & & dsa_is_user_port ( ds , 5 ) )
mcr_new | = PMCR_EXT_PHY ;
2019-09-02 15:02:24 +02:00
if ( mcr_new ! = mcr_cur )
mt7530_write ( priv , MT7530_PMCR_P ( port ) , mcr_new ) ;
}
2020-09-11 21:48:54 +08:00
static void mt753x_phylink_mac_link_down ( struct dsa_switch * ds , int port ,
2019-09-02 15:02:24 +02:00
unsigned int mode ,
phy_interface_t interface )
{
struct mt7530_priv * priv = ds - > priv ;
2020-03-27 15:44:12 +01:00
mt7530_clear ( priv , MT7530_PMCR_P ( port ) , PMCR_LINK_SETTINGS_MASK ) ;
2019-09-02 15:02:24 +02:00
}
2022-04-11 10:46:27 +01:00
static void mt753x_phylink_pcs_link_up ( struct phylink_pcs * pcs ,
unsigned int mode ,
phy_interface_t interface ,
int speed , int duplex )
2020-09-11 21:48:54 +08:00
{
2022-04-11 10:46:27 +01:00
if ( pcs - > ops - > pcs_link_up )
pcs - > ops - > pcs_link_up ( pcs , mode , interface , speed , duplex ) ;
2020-09-11 21:48:54 +08:00
}
static void mt753x_phylink_mac_link_up ( struct dsa_switch * ds , int port ,
2019-09-02 15:02:24 +02:00
unsigned int mode ,
phy_interface_t interface ,
2020-02-26 10:23:46 +00:00
struct phy_device * phydev ,
int speed , int duplex ,
bool tx_pause , bool rx_pause )
2019-09-02 15:02:24 +02:00
{
struct mt7530_priv * priv = ds - > priv ;
2020-03-27 15:44:12 +01:00
u32 mcr ;
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK ;
2020-09-11 21:48:54 +08:00
/* MT753x MAC works in 1G full duplex mode for all up-clocked
* variants .
*/
2023-04-03 02:19:40 +01:00
if ( interface = = PHY_INTERFACE_MODE_INTERNAL | |
interface = = PHY_INTERFACE_MODE_TRGMII | |
2020-09-11 21:48:54 +08:00
( phy_interface_mode_is_8023z ( interface ) ) ) {
speed = SPEED_1000 ;
duplex = DUPLEX_FULL ;
}
2020-03-27 15:44:12 +01:00
switch ( speed ) {
case SPEED_1000 :
mcr | = PMCR_FORCE_SPEED_1000 ;
break ;
case SPEED_100 :
mcr | = PMCR_FORCE_SPEED_100 ;
break ;
}
if ( duplex = = DUPLEX_FULL ) {
mcr | = PMCR_FORCE_FDX ;
if ( tx_pause )
mcr | = PMCR_TX_FC_EN ;
if ( rx_pause )
mcr | = PMCR_RX_FC_EN ;
}
2019-09-02 15:02:24 +02:00
2022-01-23 23:22:41 +08:00
if ( mode = = MLO_AN_PHY & & phydev & & phy_init_eee ( phydev , false ) > = 0 ) {
2021-04-12 08:50:31 +02:00
switch ( speed ) {
case SPEED_1000 :
mcr | = PMCR_FORCE_EEE1G ;
break ;
case SPEED_100 :
mcr | = PMCR_FORCE_EEE100 ;
break ;
}
}
2020-03-27 15:44:12 +01:00
mt7530_set ( priv , MT7530_PMCR_P ( port ) , mcr ) ;
2019-09-02 15:02:24 +02:00
}
2020-09-11 21:48:54 +08:00
static int
mt7531_cpu_port_config ( struct dsa_switch * ds , int port )
{
struct mt7530_priv * priv = ds - > priv ;
phy_interface_t interface ;
int speed ;
2020-09-19 20:28:10 +01:00
int ret ;
2020-09-11 21:48:54 +08:00
switch ( port ) {
case 5 :
if ( mt7531_is_rgmii_port ( priv , port ) )
interface = PHY_INTERFACE_MODE_RGMII ;
else
interface = PHY_INTERFACE_MODE_2500BASEX ;
priv - > p5_interface = interface ;
break ;
case 6 :
interface = PHY_INTERFACE_MODE_2500BASEX ;
priv - > p6_interface = interface ;
break ;
2020-09-19 20:28:10 +01:00
default :
return - EINVAL ;
2020-09-11 21:48:54 +08:00
}
if ( interface = = PHY_INTERFACE_MODE_2500BASEX )
speed = SPEED_2500 ;
else
speed = SPEED_1000 ;
2020-09-19 20:28:10 +01:00
ret = mt7531_mac_config ( ds , port , MLO_AN_FIXED , interface ) ;
if ( ret )
return ret ;
2020-09-11 21:48:54 +08:00
mt7530_write ( priv , MT7530_PMCR_P ( port ) ,
PMCR_CPU_PORT_SETTING ( priv - > id ) ) ;
2022-04-11 10:46:27 +01:00
mt753x_phylink_pcs_link_up ( & priv - > pcs [ port ] . pcs , MLO_AN_FIXED ,
interface , speed , DUPLEX_FULL ) ;
2020-09-11 21:48:54 +08:00
mt753x_phylink_mac_link_up ( ds , port , MLO_AN_FIXED , interface , NULL ,
speed , DUPLEX_FULL , true , true ) ;
return 0 ;
}
2023-04-03 02:19:40 +01:00
static int
mt7988_cpu_port_config ( struct dsa_switch * ds , int port )
{
struct mt7530_priv * priv = ds - > priv ;
mt7530_write ( priv , MT7530_PMCR_P ( port ) ,
PMCR_CPU_PORT_SETTING ( priv - > id ) ) ;
mt753x_phylink_mac_link_up ( ds , port , MLO_AN_FIXED ,
PHY_INTERFACE_MODE_INTERNAL , NULL ,
SPEED_10000 , DUPLEX_FULL , true , true ) ;
return 0 ;
}
2022-04-11 10:46:01 +01:00
static void mt753x_phylink_get_caps ( struct dsa_switch * ds , int port ,
struct phylink_config * config )
{
struct mt7530_priv * priv = ds - > priv ;
/* This switch only supports full-duplex at 1Gbps */
config - > mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD ;
priv - > info - > mac_port_get_caps ( ds , port , config ) ;
}
2022-04-11 10:46:32 +01:00
static int mt753x_pcs_validate ( struct phylink_pcs * pcs ,
unsigned long * supported ,
const struct phylink_link_state * state )
2019-09-02 15:02:24 +02:00
{
2022-04-11 10:46:32 +01:00
/* Autonegotiation is not supported in TRGMII nor 802.3z modes */
if ( state - > interface = = PHY_INTERFACE_MODE_TRGMII | |
phy_interface_mode_is_8023z ( state - > interface ) )
phylink_clear ( supported , Autoneg ) ;
2019-09-02 15:02:24 +02:00
2022-04-11 10:46:32 +01:00
return 0 ;
2019-09-02 15:02:24 +02:00
}
2022-04-11 10:46:27 +01:00
static void mt7530_pcs_get_state ( struct phylink_pcs * pcs ,
struct phylink_link_state * state )
2019-09-02 15:02:24 +02:00
{
2022-04-11 10:46:27 +01:00
struct mt7530_priv * priv = pcs_to_mt753x_pcs ( pcs ) - > priv ;
int port = pcs_to_mt753x_pcs ( pcs ) - > port ;
2019-09-02 15:02:24 +02:00
u32 pmsr ;
pmsr = mt7530_read ( priv , MT7530_PMSR_P ( port ) ) ;
state - > link = ( pmsr & PMSR_LINK ) ;
state - > an_complete = state - > link ;
state - > duplex = ! ! ( pmsr & PMSR_DPX ) ;
switch ( pmsr & PMSR_SPEED_MASK ) {
case PMSR_SPEED_10 :
state - > speed = SPEED_10 ;
break ;
case PMSR_SPEED_100 :
state - > speed = SPEED_100 ;
break ;
case PMSR_SPEED_1000 :
state - > speed = SPEED_1000 ;
break ;
default :
state - > speed = SPEED_UNKNOWN ;
break ;
}
state - > pause & = ~ ( MLO_PAUSE_RX | MLO_PAUSE_TX ) ;
if ( pmsr & PMSR_RX_FC )
state - > pause | = MLO_PAUSE_RX ;
if ( pmsr & PMSR_TX_FC )
state - > pause | = MLO_PAUSE_TX ;
}
2023-06-16 13:07:29 +01:00
static int mt753x_pcs_config ( struct phylink_pcs * pcs , unsigned int neg_mode ,
2022-04-11 10:46:27 +01:00
phy_interface_t interface ,
const unsigned long * advertising ,
bool permit_pause_to_mac )
2020-09-11 21:48:52 +08:00
{
2022-04-11 10:46:27 +01:00
return 0 ;
}
2020-09-11 21:48:52 +08:00
2022-04-11 10:46:27 +01:00
static void mt7530_pcs_an_restart ( struct phylink_pcs * pcs )
{
2020-09-11 21:48:52 +08:00
}
2022-04-11 10:46:27 +01:00
static const struct phylink_pcs_ops mt7530_pcs_ops = {
2022-04-11 10:46:32 +01:00
. pcs_validate = mt753x_pcs_validate ,
2022-04-11 10:46:27 +01:00
. pcs_get_state = mt7530_pcs_get_state ,
. pcs_config = mt753x_pcs_config ,
. pcs_an_restart = mt7530_pcs_an_restart ,
} ;
2020-09-11 21:48:52 +08:00
static int
mt753x_setup ( struct dsa_switch * ds )
{
struct mt7530_priv * priv = ds - > priv ;
net: dsa: mt753x: fix pcs conversion regression
Daniel Golle reports that the conversion of mt753x to phylink PCS caused
an oops as below.
The problem is with the placement of the PCS initialisation, which
occurs after mt7531_setup() has been called. However, burited in this
function is a call to setup the CPU port, which requires the PCS
structure to be already setup.
Fix this by changing the initialisation order.
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
Mem abort info:
ESR = 0x96000005
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x05: level 1 translation fault
Data abort info:
ISV = 0, ISS = 0x00000005
CM = 0, WnR = 0
user pgtable: 4k pages, 39-bit VAs, pgdp=0000000046057000
[0000000000000020] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 0 PID: 32 Comm: kworker/u4:1 Tainted: G S 5.18.0-rc3-next-20220422+ #0
Hardware name: Bananapi BPI-R64 (DT)
Workqueue: events_unbound deferred_probe_work_func
pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : mt7531_cpu_port_config+0xcc/0x1b0
lr : mt7531_cpu_port_config+0xc0/0x1b0
sp : ffffffc008d5b980
x29: ffffffc008d5b990 x28: ffffff80060562c8 x27: 00000000f805633b
x26: ffffff80001a8880 x25: 00000000000009c4 x24: 0000000000000016
x23: ffffff8005eb6470 x22: 0000000000003600 x21: ffffff8006948080
x20: 0000000000000000 x19: 0000000000000006 x18: 0000000000000000
x17: 0000000000000001 x16: 0000000000000001 x15: 02963607fcee069e
x14: 0000000000000000 x13: 0000000000000030 x12: 0101010101010101
x11: ffffffc037302000 x10: 0000000000000870 x9 : ffffffc008d5b800
x8 : ffffff800028f950 x7 : 0000000000000001 x6 : 00000000662b3000
x5 : 00000000000002f0 x4 : 0000000000000000 x3 : ffffff800028f080
x2 : 0000000000000000 x1 : ffffff800028f080 x0 : 0000000000000000
Call trace:
mt7531_cpu_port_config+0xcc/0x1b0
mt753x_cpu_port_enable+0x24/0x1f0
mt7531_setup+0x49c/0x5c0
mt753x_setup+0x20/0x31c
dsa_register_switch+0x8bc/0x1020
mt7530_probe+0x118/0x200
mdio_probe+0x30/0x64
really_probe.part.0+0x98/0x280
__driver_probe_device+0x94/0x140
driver_probe_device+0x40/0x114
__device_attach_driver+0xb0/0x10c
bus_for_each_drv+0x64/0xa0
__device_attach+0xa8/0x16c
device_initial_probe+0x10/0x20
bus_probe_device+0x94/0x9c
deferred_probe_work_func+0x80/0xb4
process_one_work+0x200/0x3a0
worker_thread+0x260/0x4c0
kthread+0xd4/0xe0
ret_from_fork+0x10/0x20
Code: 9409e911 937b7e60 8b0002a0 f9405800 (f9401005)
---[ end trace 0000000000000000 ]---
Reported-by: Daniel Golle <daniel@makrotopia.org>
Tested-by: Daniel Golle <daniel@makrotopia.org>
Fixes: cbd1f243bc41 ("net: dsa: mt7530: partially convert to phylink_pcs")
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/E1nj6FW-007WZB-5Y@rmk-PC.armlinux.org.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-04-25 22:28:02 +01:00
int i , ret ;
/* Initialise the PCS devices */
for ( i = 0 ; i < priv - > ds - > num_ports ; i + + ) {
priv - > pcs [ i ] . pcs . ops = priv - > info - > pcs_ops ;
2023-06-16 13:07:29 +01:00
priv - > pcs [ i ] . pcs . neg_mode = true ;
net: dsa: mt753x: fix pcs conversion regression
Daniel Golle reports that the conversion of mt753x to phylink PCS caused
an oops as below.
The problem is with the placement of the PCS initialisation, which
occurs after mt7531_setup() has been called. However, burited in this
function is a call to setup the CPU port, which requires the PCS
structure to be already setup.
Fix this by changing the initialisation order.
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
Mem abort info:
ESR = 0x96000005
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x05: level 1 translation fault
Data abort info:
ISV = 0, ISS = 0x00000005
CM = 0, WnR = 0
user pgtable: 4k pages, 39-bit VAs, pgdp=0000000046057000
[0000000000000020] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 0 PID: 32 Comm: kworker/u4:1 Tainted: G S 5.18.0-rc3-next-20220422+ #0
Hardware name: Bananapi BPI-R64 (DT)
Workqueue: events_unbound deferred_probe_work_func
pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : mt7531_cpu_port_config+0xcc/0x1b0
lr : mt7531_cpu_port_config+0xc0/0x1b0
sp : ffffffc008d5b980
x29: ffffffc008d5b990 x28: ffffff80060562c8 x27: 00000000f805633b
x26: ffffff80001a8880 x25: 00000000000009c4 x24: 0000000000000016
x23: ffffff8005eb6470 x22: 0000000000003600 x21: ffffff8006948080
x20: 0000000000000000 x19: 0000000000000006 x18: 0000000000000000
x17: 0000000000000001 x16: 0000000000000001 x15: 02963607fcee069e
x14: 0000000000000000 x13: 0000000000000030 x12: 0101010101010101
x11: ffffffc037302000 x10: 0000000000000870 x9 : ffffffc008d5b800
x8 : ffffff800028f950 x7 : 0000000000000001 x6 : 00000000662b3000
x5 : 00000000000002f0 x4 : 0000000000000000 x3 : ffffff800028f080
x2 : 0000000000000000 x1 : ffffff800028f080 x0 : 0000000000000000
Call trace:
mt7531_cpu_port_config+0xcc/0x1b0
mt753x_cpu_port_enable+0x24/0x1f0
mt7531_setup+0x49c/0x5c0
mt753x_setup+0x20/0x31c
dsa_register_switch+0x8bc/0x1020
mt7530_probe+0x118/0x200
mdio_probe+0x30/0x64
really_probe.part.0+0x98/0x280
__driver_probe_device+0x94/0x140
driver_probe_device+0x40/0x114
__device_attach_driver+0xb0/0x10c
bus_for_each_drv+0x64/0xa0
__device_attach+0xa8/0x16c
device_initial_probe+0x10/0x20
bus_probe_device+0x94/0x9c
deferred_probe_work_func+0x80/0xb4
process_one_work+0x200/0x3a0
worker_thread+0x260/0x4c0
kthread+0xd4/0xe0
ret_from_fork+0x10/0x20
Code: 9409e911 937b7e60 8b0002a0 f9405800 (f9401005)
---[ end trace 0000000000000000 ]---
Reported-by: Daniel Golle <daniel@makrotopia.org>
Tested-by: Daniel Golle <daniel@makrotopia.org>
Fixes: cbd1f243bc41 ("net: dsa: mt7530: partially convert to phylink_pcs")
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/E1nj6FW-007WZB-5Y@rmk-PC.armlinux.org.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-04-25 22:28:02 +01:00
priv - > pcs [ i ] . priv = priv ;
priv - > pcs [ i ] . port = i ;
}
2020-09-11 21:48:52 +08:00
net: dsa: mt753x: fix pcs conversion regression
Daniel Golle reports that the conversion of mt753x to phylink PCS caused
an oops as below.
The problem is with the placement of the PCS initialisation, which
occurs after mt7531_setup() has been called. However, burited in this
function is a call to setup the CPU port, which requires the PCS
structure to be already setup.
Fix this by changing the initialisation order.
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
Mem abort info:
ESR = 0x96000005
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x05: level 1 translation fault
Data abort info:
ISV = 0, ISS = 0x00000005
CM = 0, WnR = 0
user pgtable: 4k pages, 39-bit VAs, pgdp=0000000046057000
[0000000000000020] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 0 PID: 32 Comm: kworker/u4:1 Tainted: G S 5.18.0-rc3-next-20220422+ #0
Hardware name: Bananapi BPI-R64 (DT)
Workqueue: events_unbound deferred_probe_work_func
pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : mt7531_cpu_port_config+0xcc/0x1b0
lr : mt7531_cpu_port_config+0xc0/0x1b0
sp : ffffffc008d5b980
x29: ffffffc008d5b990 x28: ffffff80060562c8 x27: 00000000f805633b
x26: ffffff80001a8880 x25: 00000000000009c4 x24: 0000000000000016
x23: ffffff8005eb6470 x22: 0000000000003600 x21: ffffff8006948080
x20: 0000000000000000 x19: 0000000000000006 x18: 0000000000000000
x17: 0000000000000001 x16: 0000000000000001 x15: 02963607fcee069e
x14: 0000000000000000 x13: 0000000000000030 x12: 0101010101010101
x11: ffffffc037302000 x10: 0000000000000870 x9 : ffffffc008d5b800
x8 : ffffff800028f950 x7 : 0000000000000001 x6 : 00000000662b3000
x5 : 00000000000002f0 x4 : 0000000000000000 x3 : ffffff800028f080
x2 : 0000000000000000 x1 : ffffff800028f080 x0 : 0000000000000000
Call trace:
mt7531_cpu_port_config+0xcc/0x1b0
mt753x_cpu_port_enable+0x24/0x1f0
mt7531_setup+0x49c/0x5c0
mt753x_setup+0x20/0x31c
dsa_register_switch+0x8bc/0x1020
mt7530_probe+0x118/0x200
mdio_probe+0x30/0x64
really_probe.part.0+0x98/0x280
__driver_probe_device+0x94/0x140
driver_probe_device+0x40/0x114
__device_attach_driver+0xb0/0x10c
bus_for_each_drv+0x64/0xa0
__device_attach+0xa8/0x16c
device_initial_probe+0x10/0x20
bus_probe_device+0x94/0x9c
deferred_probe_work_func+0x80/0xb4
process_one_work+0x200/0x3a0
worker_thread+0x260/0x4c0
kthread+0xd4/0xe0
ret_from_fork+0x10/0x20
Code: 9409e911 937b7e60 8b0002a0 f9405800 (f9401005)
---[ end trace 0000000000000000 ]---
Reported-by: Daniel Golle <daniel@makrotopia.org>
Tested-by: Daniel Golle <daniel@makrotopia.org>
Fixes: cbd1f243bc41 ("net: dsa: mt7530: partially convert to phylink_pcs")
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/E1nj6FW-007WZB-5Y@rmk-PC.armlinux.org.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-04-25 22:28:02 +01:00
ret = priv - > info - > sw_setup ( ds ) ;
2021-05-19 11:32:00 +08:00
if ( ret )
return ret ;
2020-09-11 21:48:52 +08:00
2021-05-19 11:32:00 +08:00
ret = mt7530_setup_irq ( priv ) ;
if ( ret )
return ret ;
2020-09-11 21:48:52 +08:00
2021-05-19 11:32:00 +08:00
ret = mt7530_setup_mdio ( priv ) ;
if ( ret & & priv - > irq )
mt7530_free_irq_common ( priv ) ;
2020-09-11 21:48:52 +08:00
2023-04-16 13:08:14 +01:00
if ( priv - > create_sgmii ) {
ret = priv - > create_sgmii ( priv , mt7531_dual_sgmii_supported ( priv ) ) ;
if ( ret & & priv - > irq )
mt7530_free_irq ( priv ) ;
}
2021-05-19 11:32:00 +08:00
return ret ;
2020-09-11 21:48:52 +08:00
}
2021-04-12 08:50:31 +02:00
static int mt753x_get_mac_eee ( struct dsa_switch * ds , int port ,
struct ethtool_eee * e )
{
struct mt7530_priv * priv = ds - > priv ;
u32 eeecr = mt7530_read ( priv , MT7530_PMEEECR_P ( port ) ) ;
e - > tx_lpi_enabled = ! ( eeecr & LPI_MODE_EN ) ;
e - > tx_lpi_timer = GET_LPI_THRESH ( eeecr ) ;
return 0 ;
}
static int mt753x_set_mac_eee ( struct dsa_switch * ds , int port ,
struct ethtool_eee * e )
{
struct mt7530_priv * priv = ds - > priv ;
u32 set , mask = LPI_THRESH_MASK | LPI_MODE_EN ;
if ( e - > tx_lpi_timer > 0xFFF )
return - EINVAL ;
set = SET_LPI_THRESH ( e - > tx_lpi_timer ) ;
if ( ! e - > tx_lpi_enabled )
/* Force LPI Mode without a delay */
set | = LPI_MODE_EN ;
mt7530_rmw ( priv , MT7530_PMEEECR_P ( port ) , mask , set ) ;
return 0 ;
}
2023-04-03 02:19:40 +01:00
static int mt7988_pad_setup ( struct dsa_switch * ds , phy_interface_t interface )
{
return 0 ;
}
static int mt7988_setup ( struct dsa_switch * ds )
{
struct mt7530_priv * priv = ds - > priv ;
/* Reset the switch */
reset_control_assert ( priv - > rstc ) ;
usleep_range ( 20 , 50 ) ;
reset_control_deassert ( priv - > rstc ) ;
usleep_range ( 20 , 50 ) ;
/* Reset the switch PHYs */
mt7530_write ( priv , MT7530_SYS_CTRL , SYS_CTRL_PHY_RST ) ;
return mt7531_setup_common ( ds ) ;
}
2023-04-03 02:19:13 +01:00
const struct dsa_switch_ops mt7530_switch_ops = {
2017-04-07 16:45:09 +08:00
. get_tag_protocol = mtk_get_tag_protocol ,
2020-09-11 21:48:52 +08:00
. setup = mt753x_setup ,
net: dsa: introduce preferred_default_local_cpu_port and use on MT7530
Since the introduction of the OF bindings, DSA has always had a policy that
in case multiple CPU ports are present in the device tree, the numerically
smallest one is always chosen.
The MT7530 switch family, except the switch on the MT7988 SoC, has 2 CPU
ports, 5 and 6, where port 6 is preferable on the MT7531BE switch because
it has higher bandwidth.
The MT7530 driver developers had 3 options:
- to modify DSA when the MT7531 switch support was introduced, such as to
prefer the better port
- to declare both CPU ports in device trees as CPU ports, and live with the
sub-optimal performance resulting from not preferring the better port
- to declare just port 6 in the device tree as a CPU port
Of course they chose the path of least resistance (3rd option), kicking the
can down the road. The hardware description in the device tree is supposed
to be stable - developers are not supposed to adopt the strategy of
piecemeal hardware description, where the device tree is updated in
lockstep with the features that the kernel currently supports.
Now, as a result of the fact that they did that, any attempts to modify the
device tree and describe both CPU ports as CPU ports would make DSA change
its default selection from port 6 to 5, effectively resulting in a
performance degradation visible to users with the MT7531BE switch as can be
seen below.
Without preferring port 6:
[ ID][Role] Interval Transfer Bitrate Retr
[ 5][TX-C] 0.00-20.00 sec 374 MBytes 157 Mbits/sec 734 sender
[ 5][TX-C] 0.00-20.00 sec 373 MBytes 156 Mbits/sec receiver
[ 7][RX-C] 0.00-20.00 sec 1.81 GBytes 778 Mbits/sec 0 sender
[ 7][RX-C] 0.00-20.00 sec 1.81 GBytes 777 Mbits/sec receiver
With preferring port 6:
[ ID][Role] Interval Transfer Bitrate Retr
[ 5][TX-C] 0.00-20.00 sec 1.99 GBytes 856 Mbits/sec 273 sender
[ 5][TX-C] 0.00-20.00 sec 1.99 GBytes 855 Mbits/sec receiver
[ 7][RX-C] 0.00-20.00 sec 1.72 GBytes 737 Mbits/sec 15 sender
[ 7][RX-C] 0.00-20.00 sec 1.71 GBytes 736 Mbits/sec receiver
Using one port for WAN and the other ports for LAN is a very popular use
case which is what this test emulates.
As such, this change proposes that we retroactively modify stable kernels
(which don't support the modification of the CPU port assignments, so as to
let user space fix the problem and restore the throughput) to keep the
mt7530 driver preferring port 6 even with device trees where the hardware
is more fully described.
Fixes: c288575f7810 ("net: dsa: mt7530: Add the support of MT7531 switch")
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-06-17 09:26:48 +03:00
. preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port ,
2017-04-07 16:45:09 +08:00
. get_strings = mt7530_get_strings ,
. get_ethtool_stats = mt7530_get_ethtool_stats ,
. get_sset_count = mt7530_get_sset_count ,
2020-12-08 15:00:28 +08:00
. set_ageing_time = mt7530_set_ageing_time ,
2017-04-07 16:45:09 +08:00
. port_enable = mt7530_port_enable ,
. port_disable = mt7530_port_disable ,
2020-11-03 13:06:18 +08:00
. port_change_mtu = mt7530_port_change_mtu ,
. port_max_mtu = mt7530_port_max_mtu ,
2017-04-07 16:45:09 +08:00
. port_stp_state_set = mt7530_stp_state_set ,
2021-03-16 01:09:40 +08:00
. port_pre_bridge_flags = mt7530_port_pre_bridge_flags ,
. port_bridge_flags = mt7530_port_bridge_flags ,
2017-04-07 16:45:09 +08:00
. port_bridge_join = mt7530_port_bridge_join ,
. port_bridge_leave = mt7530_port_bridge_leave ,
. port_fdb_add = mt7530_port_fdb_add ,
. port_fdb_del = mt7530_port_fdb_del ,
. port_fdb_dump = mt7530_port_fdb_dump ,
2021-03-16 01:09:40 +08:00
. port_mdb_add = mt7530_port_mdb_add ,
. port_mdb_del = mt7530_port_mdb_del ,
2017-12-15 12:47:00 +08:00
. port_vlan_filtering = mt7530_port_vlan_filtering ,
. port_vlan_add = mt7530_port_vlan_add ,
. port_vlan_del = mt7530_port_vlan_del ,
2020-09-11 21:48:54 +08:00
. port_mirror_add = mt753x_port_mirror_add ,
. port_mirror_del = mt753x_port_mirror_del ,
2022-04-11 10:46:01 +01:00
. phylink_get_caps = mt753x_phylink_get_caps ,
2022-04-11 10:46:27 +01:00
. phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs ,
2020-09-11 21:48:52 +08:00
. phylink_mac_config = mt753x_phylink_mac_config ,
2020-09-11 21:48:54 +08:00
. phylink_mac_link_down = mt753x_phylink_mac_link_down ,
. phylink_mac_link_up = mt753x_phylink_mac_link_up ,
2021-04-12 08:50:31 +02:00
. get_mac_eee = mt753x_get_mac_eee ,
. set_mac_eee = mt753x_set_mac_eee ,
2017-04-07 16:45:09 +08:00
} ;
2023-04-03 02:19:13 +01:00
EXPORT_SYMBOL_GPL ( mt7530_switch_ops ) ;
2017-04-07 16:45:09 +08:00
2023-04-03 02:19:13 +01:00
const struct mt753x_info mt753x_table [ ] = {
2020-09-11 21:48:52 +08:00
[ ID_MT7621 ] = {
. id = ID_MT7621 ,
2022-04-11 10:46:27 +01:00
. pcs_ops = & mt7530_pcs_ops ,
2020-09-11 21:48:52 +08:00
. sw_setup = mt7530_setup ,
2023-01-17 00:52:16 +01:00
. phy_read_c22 = mt7530_phy_read_c22 ,
. phy_write_c22 = mt7530_phy_write_c22 ,
. phy_read_c45 = mt7530_phy_read_c45 ,
. phy_write_c45 = mt7530_phy_write_c45 ,
2020-09-11 21:48:52 +08:00
. pad_setup = mt7530_pad_clk_setup ,
2022-04-11 10:46:01 +01:00
. mac_port_get_caps = mt7530_mac_port_get_caps ,
2020-09-11 21:48:52 +08:00
. mac_port_config = mt7530_mac_config ,
} ,
[ ID_MT7530 ] = {
. id = ID_MT7530 ,
2022-04-11 10:46:27 +01:00
. pcs_ops = & mt7530_pcs_ops ,
2020-09-11 21:48:52 +08:00
. sw_setup = mt7530_setup ,
2023-01-17 00:52:16 +01:00
. phy_read_c22 = mt7530_phy_read_c22 ,
. phy_write_c22 = mt7530_phy_write_c22 ,
. phy_read_c45 = mt7530_phy_read_c45 ,
. phy_write_c45 = mt7530_phy_write_c45 ,
2020-09-11 21:48:52 +08:00
. pad_setup = mt7530_pad_clk_setup ,
2022-04-11 10:46:01 +01:00
. mac_port_get_caps = mt7530_mac_port_get_caps ,
2020-09-11 21:48:52 +08:00
. mac_port_config = mt7530_mac_config ,
} ,
2020-09-11 21:48:54 +08:00
[ ID_MT7531 ] = {
. id = ID_MT7531 ,
2023-03-19 12:58:43 +00:00
. pcs_ops = & mt7530_pcs_ops ,
2020-09-11 21:48:54 +08:00
. sw_setup = mt7531_setup ,
2023-01-17 00:52:16 +01:00
. phy_read_c22 = mt7531_ind_c22_phy_read ,
. phy_write_c22 = mt7531_ind_c22_phy_write ,
. phy_read_c45 = mt7531_ind_c45_phy_read ,
. phy_write_c45 = mt7531_ind_c45_phy_write ,
2020-09-11 21:48:54 +08:00
. pad_setup = mt7531_pad_setup ,
. cpu_port_config = mt7531_cpu_port_config ,
2022-04-11 10:46:01 +01:00
. mac_port_get_caps = mt7531_mac_port_get_caps ,
2020-09-11 21:48:54 +08:00
. mac_port_config = mt7531_mac_config ,
} ,
2023-04-03 02:19:40 +01:00
[ ID_MT7988 ] = {
. id = ID_MT7988 ,
. pcs_ops = & mt7530_pcs_ops ,
. sw_setup = mt7988_setup ,
. phy_read_c22 = mt7531_ind_c22_phy_read ,
. phy_write_c22 = mt7531_ind_c22_phy_write ,
. phy_read_c45 = mt7531_ind_c45_phy_read ,
. phy_write_c45 = mt7531_ind_c45_phy_write ,
. pad_setup = mt7988_pad_setup ,
. cpu_port_config = mt7988_cpu_port_config ,
. mac_port_get_caps = mt7988_mac_port_get_caps ,
. mac_port_config = mt7988_mac_config ,
} ,
2020-09-11 21:48:52 +08:00
} ;
2023-04-03 02:19:13 +01:00
EXPORT_SYMBOL_GPL ( mt753x_table ) ;
2020-09-11 21:48:52 +08:00
2023-04-03 02:19:13 +01:00
int
2023-04-03 02:18:39 +01:00
mt7530_probe_common ( struct mt7530_priv * priv )
2017-04-07 16:45:09 +08:00
{
2023-04-03 02:18:39 +01:00
struct device * dev = priv - > dev ;
2017-04-07 16:45:09 +08:00
2023-04-03 02:18:39 +01:00
priv - > ds = devm_kzalloc ( dev , sizeof ( * priv - > ds ) , GFP_KERNEL ) ;
2017-04-07 16:45:09 +08:00
if ( ! priv - > ds )
return - ENOMEM ;
2023-04-03 02:18:39 +01:00
priv - > ds - > dev = dev ;
2021-10-16 14:24:14 +08:00
priv - > ds - > num_ports = MT7530_NUM_PORTS ;
2019-10-21 16:51:30 -04:00
2019-01-30 11:24:05 +10:00
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup .
*/
2023-04-03 02:18:39 +01:00
priv - > info = of_device_get_match_data ( dev ) ;
2020-09-11 21:48:52 +08:00
if ( ! priv - > info )
return - EINVAL ;
/* Sanity check if these required device operations are filled
* properly .
*/
if ( ! priv - > info - > sw_setup | | ! priv - > info - > pad_setup | |
2023-01-17 00:52:16 +01:00
! priv - > info - > phy_read_c22 | | ! priv - > info - > phy_write_c22 | |
2022-04-11 10:46:01 +01:00
! priv - > info - > mac_port_get_caps | |
2022-04-11 10:46:27 +01:00
! priv - > info - > mac_port_config )
2020-09-11 21:48:52 +08:00
return - EINVAL ;
priv - > id = priv - > info - > id ;
2023-04-03 02:18:39 +01:00
priv - > dev = dev ;
priv - > ds - > priv = priv ;
priv - > ds - > ops = & mt7530_switch_ops ;
mutex_init ( & priv - > reg_mutex ) ;
dev_set_drvdata ( dev , priv ) ;
2017-04-07 16:45:09 +08:00
2023-04-03 02:18:39 +01:00
return 0 ;
}
2023-04-03 02:19:13 +01:00
EXPORT_SYMBOL_GPL ( mt7530_probe_common ) ;
2019-01-30 11:24:05 +10:00
2023-04-03 02:19:13 +01:00
void
2023-04-03 02:18:50 +01:00
mt7530_remove_common ( struct mt7530_priv * priv )
{
if ( priv - > irq )
mt7530_free_irq ( priv ) ;
dsa_unregister_switch ( priv - > ds ) ;
mutex_destroy ( & priv - > reg_mutex ) ;
}
2023-04-03 02:19:13 +01:00
EXPORT_SYMBOL_GPL ( mt7530_remove_common ) ;
2017-04-07 16:45:09 +08:00
MODULE_AUTHOR ( " Sean Wang <sean.wang@mediatek.com> " ) ;
MODULE_DESCRIPTION ( " Driver for Mediatek MT7530 Switch " ) ;
MODULE_LICENSE ( " GPL " ) ;