2018-11-21 02:55:09 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip KSZ9477 switch driver main logic
*
2019-02-23 03:36:47 +03:00
* Copyright ( C ) 2017 - 2019 Microchip Technology Inc .
2018-11-21 02:55:09 +03:00
*/
# include <linux/kernel.h>
# include <linux/module.h>
2019-02-23 03:36:48 +03:00
# include <linux/iopoll.h>
2018-11-21 02:55:09 +03:00
# include <linux/platform_data/microchip-ksz.h>
# include <linux/phy.h>
# include <linux/if_bridge.h>
2022-03-08 16:58:57 +03:00
# include <linux/if_vlan.h>
2018-11-21 02:55:09 +03:00
# include <net/dsa.h>
# include <net/switchdev.h>
2018-11-21 02:55:10 +03:00
# include "ksz9477_reg.h"
2019-02-23 03:36:48 +03:00
# include "ksz_common.h"
2022-06-22 12:04:23 +03:00
# include "ksz9477.h"
2018-11-21 02:55:09 +03:00
2019-03-01 06:57:24 +03:00
/* Used with variable features to indicate capabilities. */
# define GBIT_SUPPORT BIT(0)
# define NEW_XMII BIT(1)
# define IS_9893 BIT(2)
2019-06-26 02:43:42 +03:00
static void ksz_cfg ( struct ksz_device * dev , u32 addr , u8 bits , bool set )
{
2019-06-26 02:43:48 +03:00
regmap_update_bits ( dev - > regmap [ 0 ] , addr , bits , set ? bits : 0 ) ;
2019-06-26 02:43:42 +03:00
}
static void ksz_port_cfg ( struct ksz_device * dev , int port , int offset , u8 bits ,
bool set )
{
2019-06-26 02:43:48 +03:00
regmap_update_bits ( dev - > regmap [ 0 ] , PORT_CTRL_ADDR ( port , offset ) ,
bits , set ? bits : 0 ) ;
2019-06-26 02:43:42 +03:00
}
2018-11-21 02:55:09 +03:00
static void ksz9477_cfg32 ( struct ksz_device * dev , u32 addr , u32 bits , bool set )
{
2019-06-26 02:43:48 +03:00
regmap_update_bits ( dev - > regmap [ 2 ] , addr , bits , set ? bits : 0 ) ;
2018-11-21 02:55:09 +03:00
}
static void ksz9477_port_cfg32 ( struct ksz_device * dev , int port , int offset ,
u32 bits , bool set )
{
2019-06-26 02:43:48 +03:00
regmap_update_bits ( dev - > regmap [ 2 ] , PORT_CTRL_ADDR ( port , offset ) ,
bits , set ? bits : 0 ) ;
2018-11-21 02:55:09 +03:00
}
2022-06-22 12:04:23 +03:00
int ksz9477_change_mtu ( struct ksz_device * dev , int port , int mtu )
2022-03-08 16:58:57 +03:00
{
u16 frame_size , max_frame = 0 ;
int i ;
frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN ;
/* Cache the per-port MTU setting */
dev - > ports [ port ] . max_frame = frame_size ;
2022-05-17 12:43:26 +03:00
for ( i = 0 ; i < dev - > info - > port_cnt ; i + + )
2022-03-08 16:58:57 +03:00
max_frame = max ( max_frame , dev - > ports [ i ] . max_frame ) ;
return regmap_update_bits ( dev - > regmap [ 1 ] , REG_SW_MTU__2 ,
REG_SW_MTU_MASK , max_frame ) ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_max_mtu ( struct ksz_device * dev , int port )
2022-03-08 16:58:57 +03:00
{
return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN ;
}
2019-06-28 00:55:53 +03:00
static int ksz9477_wait_vlan_ctrl_ready ( struct ksz_device * dev )
2018-11-21 02:55:09 +03:00
{
2019-06-28 00:55:53 +03:00
unsigned int val ;
2018-11-21 02:55:09 +03:00
2019-06-28 00:55:53 +03:00
return regmap_read_poll_timeout ( dev - > regmap [ 0 ] , REG_SW_VLAN_CTRL ,
val , ! ( val & VLAN_START ) , 10 , 1000 ) ;
2018-11-21 02:55:09 +03:00
}
static int ksz9477_get_vlan_table ( struct ksz_device * dev , u16 vid ,
u32 * vlan_table )
{
int ret ;
mutex_lock ( & dev - > vlan_mutex ) ;
ksz_write16 ( dev , REG_SW_VLAN_ENTRY_INDEX__2 , vid & VLAN_INDEX_M ) ;
ksz_write8 ( dev , REG_SW_VLAN_CTRL , VLAN_READ | VLAN_START ) ;
/* wait to be cleared */
2019-06-28 00:55:53 +03:00
ret = ksz9477_wait_vlan_ctrl_ready ( dev ) ;
if ( ret ) {
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read vlan table \n " ) ;
goto exit ;
}
ksz_read32 ( dev , REG_SW_VLAN_ENTRY__4 , & vlan_table [ 0 ] ) ;
ksz_read32 ( dev , REG_SW_VLAN_ENTRY_UNTAG__4 , & vlan_table [ 1 ] ) ;
ksz_read32 ( dev , REG_SW_VLAN_ENTRY_PORTS__4 , & vlan_table [ 2 ] ) ;
ksz_write8 ( dev , REG_SW_VLAN_CTRL , 0 ) ;
exit :
mutex_unlock ( & dev - > vlan_mutex ) ;
return ret ;
}
static int ksz9477_set_vlan_table ( struct ksz_device * dev , u16 vid ,
u32 * vlan_table )
{
int ret ;
mutex_lock ( & dev - > vlan_mutex ) ;
ksz_write32 ( dev , REG_SW_VLAN_ENTRY__4 , vlan_table [ 0 ] ) ;
ksz_write32 ( dev , REG_SW_VLAN_ENTRY_UNTAG__4 , vlan_table [ 1 ] ) ;
ksz_write32 ( dev , REG_SW_VLAN_ENTRY_PORTS__4 , vlan_table [ 2 ] ) ;
ksz_write16 ( dev , REG_SW_VLAN_ENTRY_INDEX__2 , vid & VLAN_INDEX_M ) ;
ksz_write8 ( dev , REG_SW_VLAN_CTRL , VLAN_START | VLAN_WRITE ) ;
/* wait to be cleared */
2019-06-28 00:55:53 +03:00
ret = ksz9477_wait_vlan_ctrl_ready ( dev ) ;
if ( ret ) {
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to write vlan table \n " ) ;
goto exit ;
}
ksz_write8 ( dev , REG_SW_VLAN_CTRL , 0 ) ;
/* update vlan cache table */
dev - > vlan_cache [ vid ] . table [ 0 ] = vlan_table [ 0 ] ;
dev - > vlan_cache [ vid ] . table [ 1 ] = vlan_table [ 1 ] ;
dev - > vlan_cache [ vid ] . table [ 2 ] = vlan_table [ 2 ] ;
exit :
mutex_unlock ( & dev - > vlan_mutex ) ;
return ret ;
}
static void ksz9477_read_table ( struct ksz_device * dev , u32 * table )
{
ksz_read32 ( dev , REG_SW_ALU_VAL_A , & table [ 0 ] ) ;
ksz_read32 ( dev , REG_SW_ALU_VAL_B , & table [ 1 ] ) ;
ksz_read32 ( dev , REG_SW_ALU_VAL_C , & table [ 2 ] ) ;
ksz_read32 ( dev , REG_SW_ALU_VAL_D , & table [ 3 ] ) ;
}
static void ksz9477_write_table ( struct ksz_device * dev , u32 * table )
{
ksz_write32 ( dev , REG_SW_ALU_VAL_A , table [ 0 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_VAL_B , table [ 1 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_VAL_C , table [ 2 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_VAL_D , table [ 3 ] ) ;
}
2019-06-28 00:55:54 +03:00
static int ksz9477_wait_alu_ready ( struct ksz_device * dev )
2018-11-21 02:55:09 +03:00
{
2019-06-28 00:55:54 +03:00
unsigned int val ;
2018-11-21 02:55:09 +03:00
2019-06-28 00:55:54 +03:00
return regmap_read_poll_timeout ( dev - > regmap [ 2 ] , REG_SW_ALU_CTRL__4 ,
val , ! ( val & ALU_START ) , 10 , 1000 ) ;
2018-11-21 02:55:09 +03:00
}
2019-06-28 00:55:55 +03:00
static int ksz9477_wait_alu_sta_ready ( struct ksz_device * dev )
2018-11-21 02:55:09 +03:00
{
2019-06-28 00:55:55 +03:00
unsigned int val ;
2018-11-21 02:55:09 +03:00
2019-06-28 00:55:55 +03:00
return regmap_read_poll_timeout ( dev - > regmap [ 2 ] ,
REG_SW_ALU_STAT_CTRL__4 ,
val , ! ( val & ALU_STAT_START ) ,
10 , 1000 ) ;
2018-11-21 02:55:09 +03:00
}
2022-06-22 12:04:23 +03:00
int ksz9477_reset_switch ( struct ksz_device * dev )
2018-11-21 02:55:09 +03:00
{
u8 data8 ;
u32 data32 ;
/* reset switch */
ksz_cfg ( dev , REG_SW_OPERATION , SW_RESET , true ) ;
/* turn off SPI DO Edge select */
2019-06-28 00:55:56 +03:00
regmap_update_bits ( dev - > regmap [ 0 ] , REG_SW_GLOBAL_SERIAL_CTRL_0 ,
SPI_AUTO_EDGE_DETECTION , 0 ) ;
2018-11-21 02:55:09 +03:00
/* default configuration */
ksz_read8 ( dev , REG_SW_LUE_CTRL_1 , & data8 ) ;
data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE ;
ksz_write8 ( dev , REG_SW_LUE_CTRL_1 , data8 ) ;
/* disable interrupts */
ksz_write32 ( dev , REG_SW_INT_MASK__4 , SWITCH_INT_MASK ) ;
ksz_write32 ( dev , REG_SW_PORT_INT_MASK__4 , 0x7F ) ;
ksz_read32 ( dev , REG_SW_PORT_INT_STATUS__4 , & data32 ) ;
2022-01-27 19:41:56 +03:00
data8 = SW_ENABLE_REFCLKO ;
if ( dev - > synclko_disable )
data8 = 0 ;
else if ( dev - > synclko_125 )
data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ ;
ksz_write8 ( dev , REG_SW_GLOBAL_OUTPUT_CTRL__1 , data8 ) ;
2019-06-12 23:49:06 +03:00
2018-11-21 02:55:09 +03:00
return 0 ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_r_mib_cnt ( struct ksz_device * dev , int port , u16 addr , u64 * cnt )
2019-02-23 03:36:48 +03:00
{
struct ksz_port * p = & dev - > ports [ port ] ;
2019-06-28 00:55:52 +03:00
unsigned int val ;
2019-02-23 03:36:48 +03:00
u32 data ;
int ret ;
/* retain the flush/freeze bit */
data = p - > freeze ? MIB_COUNTER_FLUSH_FREEZE : 0 ;
data | = MIB_COUNTER_READ ;
data | = ( addr < < MIB_COUNTER_INDEX_S ) ;
ksz_pwrite32 ( dev , port , REG_PORT_MIB_CTRL_STAT__4 , data ) ;
2019-06-28 00:55:52 +03:00
ret = regmap_read_poll_timeout ( dev - > regmap [ 2 ] ,
PORT_CTRL_ADDR ( port , REG_PORT_MIB_CTRL_STAT__4 ) ,
val , ! ( val & MIB_COUNTER_READ ) , 10 , 1000 ) ;
2019-02-23 03:36:48 +03:00
/* failed to read MIB. get out of loop */
2019-06-28 00:55:52 +03:00
if ( ret ) {
2019-02-23 03:36:48 +03:00
dev_dbg ( dev - > dev , " Failed to get MIB \n " ) ;
return ;
}
/* count resets upon read */
ksz_pread32 ( dev , port , REG_PORT_MIB_DATA , & data ) ;
* cnt + = data ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_r_mib_pkt ( struct ksz_device * dev , int port , u16 addr ,
u64 * dropped , u64 * cnt )
2019-02-23 03:36:48 +03:00
{
2022-05-17 12:43:28 +03:00
addr = dev - > info - > mib_names [ addr ] . index ;
2019-02-23 03:36:48 +03:00
ksz9477_r_mib_cnt ( dev , port , addr , cnt ) ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_freeze_mib ( struct ksz_device * dev , int port , bool freeze )
2019-02-23 03:36:48 +03:00
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0 ;
struct ksz_port * p = & dev - > ports [ port ] ;
/* enable/disable the port for flush/freeze function */
mutex_lock ( & p - > mib . cnt_mutex ) ;
ksz_pwrite32 ( dev , port , REG_PORT_MIB_CTRL_STAT__4 , val ) ;
/* used by MIB counter reading code to know freeze is enabled */
p - > freeze = freeze ;
mutex_unlock ( & p - > mib . cnt_mutex ) ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_port_init_cnt ( struct ksz_device * dev , int port )
2019-02-23 03:36:48 +03:00
{
struct ksz_port_mib * mib = & dev - > ports [ port ] . mib ;
/* flush all enabled port MIB counters */
mutex_lock ( & mib - > cnt_mutex ) ;
ksz_pwrite32 ( dev , port , REG_PORT_MIB_CTRL_STAT__4 ,
MIB_COUNTER_FLUSH_FREEZE ) ;
ksz_write8 ( dev , REG_SW_MAC_CTRL_6 , SW_MIB_COUNTER_FLUSH ) ;
ksz_pwrite32 ( dev , port , REG_PORT_MIB_CTRL_STAT__4 , 0 ) ;
mutex_unlock ( & mib - > cnt_mutex ) ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_r_phy ( struct ksz_device * dev , u16 addr , u16 reg , u16 * data )
2018-11-21 02:55:09 +03:00
{
u16 val = 0xffff ;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree , but this function is
* still called for that port during initialization .
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used . For SGMII PHY the supporting code will be added later .
*/
if ( addr > = dev - > phy_port_cnt ) {
struct ksz_port * p = & dev - > ports [ addr ] ;
switch ( reg ) {
case MII_BMCR :
val = 0x1140 ;
break ;
case MII_BMSR :
val = 0x796d ;
break ;
case MII_PHYSID1 :
val = 0x0022 ;
break ;
case MII_PHYSID2 :
val = 0x1631 ;
break ;
case MII_ADVERTISE :
val = 0x05e1 ;
break ;
case MII_LPA :
val = 0xc5e1 ;
break ;
case MII_CTRL1000 :
val = 0x0700 ;
break ;
case MII_STAT1000 :
if ( p - > phydev . speed = = SPEED_1000 )
val = 0x3800 ;
else
val = 0 ;
break ;
}
} else {
ksz_pread16 ( dev , addr , 0x100 + ( reg < < 1 ) , & val ) ;
}
2022-06-17 11:42:48 +03:00
* data = val ;
2018-11-21 02:55:09 +03:00
}
2022-06-22 12:04:23 +03:00
void ksz9477_w_phy ( struct ksz_device * dev , u16 addr , u16 reg , u16 val )
2018-11-21 02:55:09 +03:00
{
/* No real PHY after this. */
if ( addr > = dev - > phy_port_cnt )
2022-06-17 11:42:48 +03:00
return ;
2019-03-01 06:57:24 +03:00
/* No gigabit support. Do not write to this register. */
if ( ! ( dev - > features & GBIT_SUPPORT ) & & reg = = MII_CTRL1000 )
2022-06-17 11:42:48 +03:00
return ;
2018-11-21 02:55:09 +03:00
2022-06-17 11:42:48 +03:00
ksz_pwrite16 ( dev , addr , 0x100 + ( reg < < 1 ) , val ) ;
2018-11-21 02:55:09 +03:00
}
2022-06-22 12:04:23 +03:00
void ksz9477_cfg_port_member ( struct ksz_device * dev , int port , u8 member )
2018-11-21 02:55:09 +03:00
{
ksz_pwrite32 ( dev , port , REG_PORT_VLAN_MEMBERSHIP__4 , member ) ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_flush_dyn_mac_table ( struct ksz_device * dev , int port )
2018-11-21 02:55:09 +03:00
{
2022-06-28 20:13:28 +03:00
const u16 * regs = dev - > info - > regs ;
2018-11-21 02:55:09 +03:00
u8 data ;
2019-06-28 00:55:56 +03:00
regmap_update_bits ( dev - > regmap [ 0 ] , REG_SW_LUE_CTRL_2 ,
SW_FLUSH_OPTION_M < < SW_FLUSH_OPTION_S ,
SW_FLUSH_OPTION_DYN_MAC < < SW_FLUSH_OPTION_S ) ;
2022-05-17 12:43:26 +03:00
if ( port < dev - > info - > port_cnt ) {
2018-11-21 02:55:09 +03:00
/* flush individual port */
2022-06-28 20:13:28 +03:00
ksz_pread8 ( dev , port , regs [ P_STP_CTRL ] , & data ) ;
2018-11-21 02:55:09 +03:00
if ( ! ( data & PORT_LEARN_DISABLE ) )
2022-06-28 20:13:28 +03:00
ksz_pwrite8 ( dev , port , regs [ P_STP_CTRL ] ,
2018-11-21 02:55:09 +03:00
data | PORT_LEARN_DISABLE ) ;
ksz_cfg ( dev , S_FLUSH_TABLE_CTRL , SW_FLUSH_DYN_MAC_TABLE , true ) ;
2022-06-28 20:13:28 +03:00
ksz_pwrite8 ( dev , port , regs [ P_STP_CTRL ] , data ) ;
2018-11-21 02:55:09 +03:00
} else {
/* flush all */
ksz_cfg ( dev , S_FLUSH_TABLE_CTRL , SW_FLUSH_STP_TABLE , true ) ;
}
}
2022-06-22 12:04:23 +03:00
int ksz9477_port_vlan_filtering ( struct ksz_device * dev , int port ,
bool flag , struct netlink_ext_ack * extack )
2018-11-21 02:55:09 +03:00
{
if ( flag ) {
ksz_port_cfg ( dev , port , REG_PORT_LUE_CTRL ,
PORT_VLAN_LOOKUP_VID_0 , true ) ;
ksz_cfg ( dev , REG_SW_LUE_CTRL_0 , SW_VLAN_ENABLE , true ) ;
} else {
ksz_cfg ( dev , REG_SW_LUE_CTRL_0 , SW_VLAN_ENABLE , false ) ;
ksz_port_cfg ( dev , port , REG_PORT_LUE_CTRL ,
PORT_VLAN_LOOKUP_VID_0 , false ) ;
}
return 0 ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_port_vlan_add ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_vlan * vlan ,
struct netlink_ext_ack * extack )
2018-11-21 02:55:09 +03:00
{
u32 vlan_table [ 3 ] ;
bool untagged = vlan - > flags & BRIDGE_VLAN_INFO_UNTAGGED ;
2021-01-09 03:01:53 +03:00
int err ;
2018-11-21 02:55:09 +03:00
2021-01-09 03:01:53 +03:00
err = ksz9477_get_vlan_table ( dev , vlan - > vid , vlan_table ) ;
if ( err ) {
2021-02-13 23:43:18 +03:00
NL_SET_ERR_MSG_MOD ( extack , " Failed to get vlan table " ) ;
2021-01-09 03:01:53 +03:00
return err ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
}
2018-11-21 02:55:09 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
vlan_table [ 0 ] = VLAN_VALID | ( vlan - > vid & VLAN_FID_M ) ;
if ( untagged )
vlan_table [ 1 ] | = BIT ( port ) ;
else
vlan_table [ 1 ] & = ~ BIT ( port ) ;
vlan_table [ 1 ] & = ~ ( BIT ( dev - > cpu_port ) ) ;
2018-11-21 02:55:09 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
vlan_table [ 2 ] | = BIT ( port ) | BIT ( dev - > cpu_port ) ;
2018-11-21 02:55:09 +03:00
2021-01-09 03:01:53 +03:00
err = ksz9477_set_vlan_table ( dev , vlan - > vid , vlan_table ) ;
if ( err ) {
2021-02-13 23:43:18 +03:00
NL_SET_ERR_MSG_MOD ( extack , " Failed to set vlan table " ) ;
2021-01-09 03:01:53 +03:00
return err ;
2018-11-21 02:55:09 +03:00
}
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
/* change PVID */
if ( vlan - > flags & BRIDGE_VLAN_INFO_PVID )
ksz_pwrite16 ( dev , port , REG_PORT_DEFAULT_VID , vlan - > vid ) ;
2021-01-09 03:01:53 +03:00
return 0 ;
2018-11-21 02:55:09 +03:00
}
2022-06-22 12:04:23 +03:00
int ksz9477_port_vlan_del ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_vlan * vlan )
2018-11-21 02:55:09 +03:00
{
bool untagged = vlan - > flags & BRIDGE_VLAN_INFO_UNTAGGED ;
u32 vlan_table [ 3 ] ;
u16 pvid ;
ksz_pread16 ( dev , port , REG_PORT_DEFAULT_VID , & pvid ) ;
pvid = pvid & 0xFFF ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
if ( ksz9477_get_vlan_table ( dev , vlan - > vid , vlan_table ) ) {
dev_dbg ( dev - > dev , " Failed to get vlan table \n " ) ;
return - ETIMEDOUT ;
}
2018-11-21 02:55:09 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
vlan_table [ 2 ] & = ~ BIT ( port ) ;
2018-11-21 02:55:09 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
if ( pvid = = vlan - > vid )
pvid = 1 ;
2018-11-21 02:55:09 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
if ( untagged )
vlan_table [ 1 ] & = ~ BIT ( port ) ;
2018-11-21 02:55:09 +03:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
if ( ksz9477_set_vlan_table ( dev , vlan - > vid , vlan_table ) ) {
dev_dbg ( dev - > dev , " Failed to set vlan table \n " ) ;
return - ETIMEDOUT ;
2018-11-21 02:55:09 +03:00
}
ksz_pwrite16 ( dev , port , REG_PORT_DEFAULT_VID , pvid ) ;
return 0 ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_fdb_add ( struct ksz_device * dev , int port ,
const unsigned char * addr , u16 vid , struct dsa_db db )
2018-11-21 02:55:09 +03:00
{
u32 alu_table [ 4 ] ;
u32 data ;
int ret = 0 ;
mutex_lock ( & dev - > alu_mutex ) ;
/* find any entry with mac & vid */
data = vid < < ALU_FID_INDEX_S ;
data | = ( ( addr [ 0 ] < < 8 ) | addr [ 1 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_INDEX_0 , data ) ;
data = ( ( addr [ 2 ] < < 24 ) | ( addr [ 3 ] < < 16 ) ) ;
data | = ( ( addr [ 4 ] < < 8 ) | addr [ 5 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_INDEX_1 , data ) ;
/* start read operation */
ksz_write32 ( dev , REG_SW_ALU_CTRL__4 , ALU_READ | ALU_START ) ;
/* wait to be finished */
2019-06-28 00:55:54 +03:00
ret = ksz9477_wait_alu_ready ( dev ) ;
if ( ret ) {
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read ALU \n " ) ;
goto exit ;
}
/* read ALU entry */
ksz9477_read_table ( dev , alu_table ) ;
/* update ALU entry */
alu_table [ 0 ] = ALU_V_STATIC_VALID ;
alu_table [ 1 ] | = BIT ( port ) ;
if ( vid )
alu_table [ 1 ] | = ALU_V_USE_FID ;
alu_table [ 2 ] = ( vid < < ALU_V_FID_S ) ;
alu_table [ 2 ] | = ( ( addr [ 0 ] < < 8 ) | addr [ 1 ] ) ;
alu_table [ 3 ] = ( ( addr [ 2 ] < < 24 ) | ( addr [ 3 ] < < 16 ) ) ;
alu_table [ 3 ] | = ( ( addr [ 4 ] < < 8 ) | addr [ 5 ] ) ;
ksz9477_write_table ( dev , alu_table ) ;
ksz_write32 ( dev , REG_SW_ALU_CTRL__4 , ALU_WRITE | ALU_START ) ;
/* wait to be finished */
2019-06-28 00:55:54 +03:00
ret = ksz9477_wait_alu_ready ( dev ) ;
if ( ret )
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to write ALU \n " ) ;
exit :
mutex_unlock ( & dev - > alu_mutex ) ;
return ret ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_fdb_del ( struct ksz_device * dev , int port ,
const unsigned char * addr , u16 vid , struct dsa_db db )
2018-11-21 02:55:09 +03:00
{
u32 alu_table [ 4 ] ;
u32 data ;
int ret = 0 ;
mutex_lock ( & dev - > alu_mutex ) ;
/* read any entry with mac & vid */
data = vid < < ALU_FID_INDEX_S ;
data | = ( ( addr [ 0 ] < < 8 ) | addr [ 1 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_INDEX_0 , data ) ;
data = ( ( addr [ 2 ] < < 24 ) | ( addr [ 3 ] < < 16 ) ) ;
data | = ( ( addr [ 4 ] < < 8 ) | addr [ 5 ] ) ;
ksz_write32 ( dev , REG_SW_ALU_INDEX_1 , data ) ;
/* start read operation */
ksz_write32 ( dev , REG_SW_ALU_CTRL__4 , ALU_READ | ALU_START ) ;
/* wait to be finished */
2019-06-28 00:55:54 +03:00
ret = ksz9477_wait_alu_ready ( dev ) ;
if ( ret ) {
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read ALU \n " ) ;
goto exit ;
}
ksz_read32 ( dev , REG_SW_ALU_VAL_A , & alu_table [ 0 ] ) ;
if ( alu_table [ 0 ] & ALU_V_STATIC_VALID ) {
ksz_read32 ( dev , REG_SW_ALU_VAL_B , & alu_table [ 1 ] ) ;
ksz_read32 ( dev , REG_SW_ALU_VAL_C , & alu_table [ 2 ] ) ;
ksz_read32 ( dev , REG_SW_ALU_VAL_D , & alu_table [ 3 ] ) ;
/* clear forwarding port */
alu_table [ 2 ] & = ~ BIT ( port ) ;
/* if there is no port to forward, clear table */
if ( ( alu_table [ 2 ] & ALU_V_PORT_MAP ) = = 0 ) {
alu_table [ 0 ] = 0 ;
alu_table [ 1 ] = 0 ;
alu_table [ 2 ] = 0 ;
alu_table [ 3 ] = 0 ;
}
} else {
alu_table [ 0 ] = 0 ;
alu_table [ 1 ] = 0 ;
alu_table [ 2 ] = 0 ;
alu_table [ 3 ] = 0 ;
}
ksz9477_write_table ( dev , alu_table ) ;
ksz_write32 ( dev , REG_SW_ALU_CTRL__4 , ALU_WRITE | ALU_START ) ;
/* wait to be finished */
2019-06-28 00:55:54 +03:00
ret = ksz9477_wait_alu_ready ( dev ) ;
if ( ret )
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to write ALU \n " ) ;
exit :
mutex_unlock ( & dev - > alu_mutex ) ;
return ret ;
}
static void ksz9477_convert_alu ( struct alu_struct * alu , u32 * alu_table )
{
alu - > is_static = ! ! ( alu_table [ 0 ] & ALU_V_STATIC_VALID ) ;
alu - > is_src_filter = ! ! ( alu_table [ 0 ] & ALU_V_SRC_FILTER ) ;
alu - > is_dst_filter = ! ! ( alu_table [ 0 ] & ALU_V_DST_FILTER ) ;
alu - > prio_age = ( alu_table [ 0 ] > > ALU_V_PRIO_AGE_CNT_S ) &
ALU_V_PRIO_AGE_CNT_M ;
alu - > mstp = alu_table [ 0 ] & ALU_V_MSTP_M ;
alu - > is_override = ! ! ( alu_table [ 1 ] & ALU_V_OVERRIDE ) ;
alu - > is_use_fid = ! ! ( alu_table [ 1 ] & ALU_V_USE_FID ) ;
alu - > port_forward = alu_table [ 1 ] & ALU_V_PORT_MAP ;
alu - > fid = ( alu_table [ 2 ] > > ALU_V_FID_S ) & ALU_V_FID_M ;
alu - > mac [ 0 ] = ( alu_table [ 2 ] > > 8 ) & 0xFF ;
alu - > mac [ 1 ] = alu_table [ 2 ] & 0xFF ;
alu - > mac [ 2 ] = ( alu_table [ 3 ] > > 24 ) & 0xFF ;
alu - > mac [ 3 ] = ( alu_table [ 3 ] > > 16 ) & 0xFF ;
alu - > mac [ 4 ] = ( alu_table [ 3 ] > > 8 ) & 0xFF ;
alu - > mac [ 5 ] = alu_table [ 3 ] & 0xFF ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_fdb_dump ( struct ksz_device * dev , int port ,
dsa_fdb_dump_cb_t * cb , void * data )
2018-11-21 02:55:09 +03:00
{
int ret = 0 ;
u32 ksz_data ;
u32 alu_table [ 4 ] ;
struct alu_struct alu ;
int timeout ;
mutex_lock ( & dev - > alu_mutex ) ;
/* start ALU search */
ksz_write32 ( dev , REG_SW_ALU_CTRL__4 , ALU_START | ALU_SEARCH ) ;
do {
timeout = 1000 ;
do {
ksz_read32 ( dev , REG_SW_ALU_CTRL__4 , & ksz_data ) ;
if ( ( ksz_data & ALU_VALID ) | | ! ( ksz_data & ALU_START ) )
break ;
usleep_range ( 1 , 10 ) ;
} while ( timeout - - > 0 ) ;
if ( ! timeout ) {
dev_dbg ( dev - > dev , " Failed to search ALU \n " ) ;
ret = - ETIMEDOUT ;
goto exit ;
}
/* read ALU table */
ksz9477_read_table ( dev , alu_table ) ;
ksz9477_convert_alu ( & alu , alu_table ) ;
if ( alu . port_forward & BIT ( port ) ) {
ret = cb ( alu . mac , alu . fid , alu . is_static , data ) ;
if ( ret )
goto exit ;
}
} while ( ksz_data & ALU_START ) ;
exit :
/* stop ALU search */
ksz_write32 ( dev , REG_SW_ALU_CTRL__4 , 0 ) ;
mutex_unlock ( & dev - > alu_mutex ) ;
return ret ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_mdb_add ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_mdb * mdb , struct dsa_db db )
2018-11-21 02:55:09 +03:00
{
u32 static_table [ 4 ] ;
u32 data ;
int index ;
u32 mac_hi , mac_lo ;
2021-01-09 03:01:52 +03:00
int err = 0 ;
2018-11-21 02:55:09 +03:00
mac_hi = ( ( mdb - > addr [ 0 ] < < 8 ) | mdb - > addr [ 1 ] ) ;
mac_lo = ( ( mdb - > addr [ 2 ] < < 24 ) | ( mdb - > addr [ 3 ] < < 16 ) ) ;
mac_lo | = ( ( mdb - > addr [ 4 ] < < 8 ) | mdb - > addr [ 5 ] ) ;
mutex_lock ( & dev - > alu_mutex ) ;
2022-05-17 12:43:26 +03:00
for ( index = 0 ; index < dev - > info - > num_statics ; index + + ) {
2018-11-21 02:55:09 +03:00
/* find empty slot first */
data = ( index < < ALU_STAT_INDEX_S ) |
ALU_STAT_READ | ALU_STAT_START ;
ksz_write32 ( dev , REG_SW_ALU_STAT_CTRL__4 , data ) ;
/* wait to be finished */
2021-01-09 03:01:52 +03:00
err = ksz9477_wait_alu_sta_ready ( dev ) ;
if ( err ) {
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read ALU STATIC \n " ) ;
goto exit ;
}
/* read ALU static table */
ksz9477_read_table ( dev , static_table ) ;
if ( static_table [ 0 ] & ALU_V_STATIC_VALID ) {
/* check this has same vid & mac address */
if ( ( ( static_table [ 2 ] > > ALU_V_FID_S ) = = mdb - > vid ) & &
( ( static_table [ 2 ] & ALU_V_MAC_ADDR_HI ) = = mac_hi ) & &
static_table [ 3 ] = = mac_lo ) {
/* found matching one */
break ;
}
} else {
/* found empty one */
break ;
}
}
/* no available entry */
2022-05-17 12:43:26 +03:00
if ( index = = dev - > info - > num_statics ) {
2021-01-09 03:01:52 +03:00
err = - ENOSPC ;
2018-11-21 02:55:09 +03:00
goto exit ;
2021-01-09 03:01:52 +03:00
}
2018-11-21 02:55:09 +03:00
/* add entry */
static_table [ 0 ] = ALU_V_STATIC_VALID ;
static_table [ 1 ] | = BIT ( port ) ;
if ( mdb - > vid )
static_table [ 1 ] | = ALU_V_USE_FID ;
static_table [ 2 ] = ( mdb - > vid < < ALU_V_FID_S ) ;
static_table [ 2 ] | = mac_hi ;
static_table [ 3 ] = mac_lo ;
ksz9477_write_table ( dev , static_table ) ;
data = ( index < < ALU_STAT_INDEX_S ) | ALU_STAT_START ;
ksz_write32 ( dev , REG_SW_ALU_STAT_CTRL__4 , data ) ;
/* wait to be finished */
2019-06-28 00:55:55 +03:00
if ( ksz9477_wait_alu_sta_ready ( dev ) )
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read ALU STATIC \n " ) ;
exit :
mutex_unlock ( & dev - > alu_mutex ) ;
2021-01-09 03:01:52 +03:00
return err ;
2018-11-21 02:55:09 +03:00
}
2022-06-22 12:04:23 +03:00
int ksz9477_mdb_del ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_mdb * mdb , struct dsa_db db )
2018-11-21 02:55:09 +03:00
{
u32 static_table [ 4 ] ;
u32 data ;
int index ;
int ret = 0 ;
u32 mac_hi , mac_lo ;
mac_hi = ( ( mdb - > addr [ 0 ] < < 8 ) | mdb - > addr [ 1 ] ) ;
mac_lo = ( ( mdb - > addr [ 2 ] < < 24 ) | ( mdb - > addr [ 3 ] < < 16 ) ) ;
mac_lo | = ( ( mdb - > addr [ 4 ] < < 8 ) | mdb - > addr [ 5 ] ) ;
mutex_lock ( & dev - > alu_mutex ) ;
2022-05-17 12:43:26 +03:00
for ( index = 0 ; index < dev - > info - > num_statics ; index + + ) {
2018-11-21 02:55:09 +03:00
/* find empty slot first */
data = ( index < < ALU_STAT_INDEX_S ) |
ALU_STAT_READ | ALU_STAT_START ;
ksz_write32 ( dev , REG_SW_ALU_STAT_CTRL__4 , data ) ;
/* wait to be finished */
2019-06-28 00:55:55 +03:00
ret = ksz9477_wait_alu_sta_ready ( dev ) ;
if ( ret ) {
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read ALU STATIC \n " ) ;
goto exit ;
}
/* read ALU static table */
ksz9477_read_table ( dev , static_table ) ;
if ( static_table [ 0 ] & ALU_V_STATIC_VALID ) {
/* check this has same vid & mac address */
if ( ( ( static_table [ 2 ] > > ALU_V_FID_S ) = = mdb - > vid ) & &
( ( static_table [ 2 ] & ALU_V_MAC_ADDR_HI ) = = mac_hi ) & &
static_table [ 3 ] = = mac_lo ) {
/* found matching one */
break ;
}
}
}
/* no available entry */
2022-05-17 12:43:26 +03:00
if ( index = = dev - > info - > num_statics )
2018-11-21 02:55:09 +03:00
goto exit ;
/* clear port */
static_table [ 1 ] & = ~ BIT ( port ) ;
if ( ( static_table [ 1 ] & ALU_V_PORT_MAP ) = = 0 ) {
/* delete entry */
static_table [ 0 ] = 0 ;
static_table [ 1 ] = 0 ;
static_table [ 2 ] = 0 ;
static_table [ 3 ] = 0 ;
}
ksz9477_write_table ( dev , static_table ) ;
data = ( index < < ALU_STAT_INDEX_S ) | ALU_STAT_START ;
ksz_write32 ( dev , REG_SW_ALU_STAT_CTRL__4 , data ) ;
/* wait to be finished */
2019-06-28 00:55:55 +03:00
ret = ksz9477_wait_alu_sta_ready ( dev ) ;
if ( ret )
2018-11-21 02:55:09 +03:00
dev_dbg ( dev - > dev , " Failed to read ALU STATIC \n " ) ;
exit :
mutex_unlock ( & dev - > alu_mutex ) ;
return ret ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_port_mirror_add ( struct ksz_device * dev , int port ,
struct dsa_mall_mirror_tc_entry * mirror ,
bool ingress , struct netlink_ext_ack * extack )
2018-11-21 02:55:09 +03:00
{
2022-04-28 10:07:09 +03:00
u8 data ;
int p ;
/* Limit to one sniffer port
* Check if any of the port is already set for sniffing
* If yes , instruct the user to remove the previous entry & exit
*/
2022-05-17 12:43:26 +03:00
for ( p = 0 ; p < dev - > info - > port_cnt ; p + + ) {
2022-04-28 10:07:09 +03:00
/* Skip the current sniffing port */
if ( p = = mirror - > to_local_port )
continue ;
ksz_pread8 ( dev , p , P_MIRROR_CTRL , & data ) ;
if ( data & PORT_MIRROR_SNIFFER ) {
NL_SET_ERR_MSG_MOD ( extack ,
" Sniffer port is already configured, delete existing rules & retry " ) ;
return - EBUSY ;
}
}
2018-11-21 02:55:09 +03:00
if ( ingress )
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_RX , true ) ;
else
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_TX , true ) ;
/* configure mirror port */
ksz_port_cfg ( dev , mirror - > to_local_port , P_MIRROR_CTRL ,
PORT_MIRROR_SNIFFER , true ) ;
ksz_cfg ( dev , S_MIRROR_CTRL , SW_MIRROR_RX_TX , false ) ;
return 0 ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_port_mirror_del ( struct ksz_device * dev , int port ,
struct dsa_mall_mirror_tc_entry * mirror )
2018-11-21 02:55:09 +03:00
{
2022-04-28 10:07:09 +03:00
bool in_use = false ;
2018-11-21 02:55:09 +03:00
u8 data ;
2022-04-28 10:07:09 +03:00
int p ;
2018-11-21 02:55:09 +03:00
if ( mirror - > ingress )
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_RX , false ) ;
else
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_TX , false ) ;
2022-04-28 10:07:09 +03:00
/* Check if any of the port is still referring to sniffer port */
2022-05-17 12:43:26 +03:00
for ( p = 0 ; p < dev - > info - > port_cnt ; p + + ) {
2022-04-28 10:07:09 +03:00
ksz_pread8 ( dev , p , P_MIRROR_CTRL , & data ) ;
if ( ( data & ( PORT_MIRROR_RX | PORT_MIRROR_TX ) ) ) {
in_use = true ;
break ;
}
}
/* delete sniffing if there are no other mirroring rules */
if ( ! in_use )
2018-11-21 02:55:09 +03:00
ksz_port_cfg ( dev , mirror - > to_local_port , P_MIRROR_CTRL ,
PORT_MIRROR_SNIFFER , false ) ;
}
2019-03-01 06:57:24 +03:00
static bool ksz9477_get_gbit ( struct ksz_device * dev , u8 data )
{
bool gbit ;
if ( dev - > features & NEW_XMII )
gbit = ! ( data & PORT_MII_NOT_1GBIT ) ;
else
gbit = ! ! ( data & PORT_MII_1000MBIT_S1 ) ;
return gbit ;
}
static void ksz9477_set_gbit ( struct ksz_device * dev , bool gbit , u8 * data )
{
if ( dev - > features & NEW_XMII ) {
if ( gbit )
* data & = ~ PORT_MII_NOT_1GBIT ;
else
* data | = PORT_MII_NOT_1GBIT ;
} else {
if ( gbit )
* data | = PORT_MII_1000MBIT_S1 ;
else
* data & = ~ PORT_MII_1000MBIT_S1 ;
}
}
static int ksz9477_get_xmii ( struct ksz_device * dev , u8 data )
{
int mode ;
if ( dev - > features & NEW_XMII ) {
switch ( data & PORT_MII_SEL_M ) {
case PORT_MII_SEL :
mode = 0 ;
break ;
case PORT_RMII_SEL :
mode = 1 ;
break ;
case PORT_GMII_SEL :
mode = 2 ;
break ;
default :
mode = 3 ;
}
} else {
switch ( data & PORT_MII_SEL_M ) {
case PORT_MII_SEL_S1 :
mode = 0 ;
break ;
case PORT_RMII_SEL_S1 :
mode = 1 ;
break ;
case PORT_GMII_SEL_S1 :
mode = 2 ;
break ;
default :
mode = 3 ;
}
}
return mode ;
}
static void ksz9477_set_xmii ( struct ksz_device * dev , int mode , u8 * data )
{
u8 xmii ;
if ( dev - > features & NEW_XMII ) {
switch ( mode ) {
case 0 :
xmii = PORT_MII_SEL ;
break ;
case 1 :
xmii = PORT_RMII_SEL ;
break ;
case 2 :
xmii = PORT_GMII_SEL ;
break ;
default :
xmii = PORT_RGMII_SEL ;
break ;
}
} else {
switch ( mode ) {
case 0 :
xmii = PORT_MII_SEL_S1 ;
break ;
case 1 :
xmii = PORT_RMII_SEL_S1 ;
break ;
case 2 :
xmii = PORT_GMII_SEL_S1 ;
break ;
default :
xmii = PORT_RGMII_SEL_S1 ;
break ;
}
}
* data & = ~ PORT_MII_SEL_M ;
* data | = xmii ;
}
static phy_interface_t ksz9477_get_interface ( struct ksz_device * dev , int port )
{
phy_interface_t interface ;
bool gbit ;
int mode ;
u8 data8 ;
if ( port < dev - > phy_port_cnt )
return PHY_INTERFACE_MODE_NA ;
ksz_pread8 ( dev , port , REG_PORT_XMII_CTRL_1 , & data8 ) ;
gbit = ksz9477_get_gbit ( dev , data8 ) ;
mode = ksz9477_get_xmii ( dev , data8 ) ;
switch ( mode ) {
case 2 :
interface = PHY_INTERFACE_MODE_GMII ;
if ( gbit )
break ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2019-03-01 06:57:24 +03:00
case 0 :
interface = PHY_INTERFACE_MODE_MII ;
break ;
case 1 :
interface = PHY_INTERFACE_MODE_RMII ;
break ;
default :
interface = PHY_INTERFACE_MODE_RGMII ;
if ( data8 & PORT_RGMII_ID_EG_ENABLE )
interface = PHY_INTERFACE_MODE_RGMII_TXID ;
if ( data8 & PORT_RGMII_ID_IG_ENABLE ) {
interface = PHY_INTERFACE_MODE_RGMII_RXID ;
if ( data8 & PORT_RGMII_ID_EG_ENABLE )
interface = PHY_INTERFACE_MODE_RGMII_ID ;
}
break ;
2019-02-23 03:36:47 +03:00
}
2019-03-01 06:57:24 +03:00
return interface ;
2019-02-23 03:36:47 +03:00
}
2019-06-12 23:49:05 +03:00
static void ksz9477_port_mmd_write ( struct ksz_device * dev , int port ,
u8 dev_addr , u16 reg_addr , u16 val )
{
ksz_pwrite16 ( dev , port , REG_PORT_PHY_MMD_SETUP ,
MMD_SETUP ( PORT_MMD_OP_INDEX , dev_addr ) ) ;
ksz_pwrite16 ( dev , port , REG_PORT_PHY_MMD_INDEX_DATA , reg_addr ) ;
ksz_pwrite16 ( dev , port , REG_PORT_PHY_MMD_SETUP ,
MMD_SETUP ( PORT_MMD_OP_DATA_NO_INCR , dev_addr ) ) ;
ksz_pwrite16 ( dev , port , REG_PORT_PHY_MMD_INDEX_DATA , val ) ;
}
static void ksz9477_phy_errata_setup ( struct ksz_device * dev , int port )
{
/* Apply PHY settings to address errata listed in
* KSZ9477 , KSZ9897 , KSZ9896 , KSZ9567 , KSZ8565
* Silicon Errata and Data Sheet Clarification documents :
*
* Register settings are needed to improve PHY receive performance
*/
ksz9477_port_mmd_write ( dev , port , 0x01 , 0x6f , 0xdd0b ) ;
ksz9477_port_mmd_write ( dev , port , 0x01 , 0x8f , 0x6032 ) ;
ksz9477_port_mmd_write ( dev , port , 0x01 , 0x9d , 0x248c ) ;
ksz9477_port_mmd_write ( dev , port , 0x01 , 0x75 , 0x0060 ) ;
ksz9477_port_mmd_write ( dev , port , 0x01 , 0xd3 , 0x7777 ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x06 , 0x3008 ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x08 , 0x2001 ) ;
/* Transmit waveform amplitude can be improved
* ( 1000 BASE - T , 100 BASE - TX , 10 BASE - Te )
*/
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x04 , 0x00d0 ) ;
/* Energy Efficient Ethernet (EEE) feature select must
* be manually disabled ( except on KSZ8565 which is 100 Mbit )
*/
if ( dev - > features & GBIT_SUPPORT )
ksz9477_port_mmd_write ( dev , port , 0x07 , 0x3c , 0x0000 ) ;
/* Register settings are required to meet data sheet
* supply current specifications
*/
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x13 , 0x6eff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x14 , 0xe6ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x15 , 0x6eff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x16 , 0xe6ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x17 , 0x00ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x18 , 0x43ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x19 , 0xc3ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x1a , 0x6fff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x1b , 0x07ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x1c , 0x0fff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x1d , 0xe7ff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x1e , 0xefff ) ;
ksz9477_port_mmd_write ( dev , port , 0x1c , 0x20 , 0xeeee ) ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_get_caps ( struct ksz_device * dev , int port ,
struct phylink_config * config )
2022-05-17 12:43:32 +03:00
{
2022-06-22 12:04:21 +03:00
config - > mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
MAC_SYM_PAUSE ;
if ( dev - > features & GBIT_SUPPORT )
config - > mac_capabilities | = MAC_1000FD ;
2022-05-17 12:43:32 +03:00
}
2022-06-22 12:04:23 +03:00
void ksz9477_port_setup ( struct ksz_device * dev , int port , bool cpu_port )
2018-11-21 02:55:09 +03:00
{
struct ksz_port * p = & dev - > ports [ port ] ;
2021-11-26 15:39:26 +03:00
struct dsa_switch * ds = dev - > ds ;
u8 data8 , member ;
u16 data16 ;
2018-11-21 02:55:09 +03:00
/* enable tag tail for host port */
if ( cpu_port )
ksz_port_cfg ( dev , port , REG_PORT_CTRL_0 , PORT_TAIL_TAG_ENABLE ,
true ) ;
ksz_port_cfg ( dev , port , REG_PORT_CTRL_0 , PORT_MAC_LOOPBACK , false ) ;
/* set back pressure */
ksz_port_cfg ( dev , port , REG_PORT_MAC_CTRL_1 , PORT_BACK_PRESSURE , true ) ;
/* enable broadcast storm limit */
ksz_port_cfg ( dev , port , P_BCAST_STORM_CTRL , PORT_BROADCAST_STORM , true ) ;
/* disable DiffServ priority */
ksz_port_cfg ( dev , port , P_PRIO_CTRL , PORT_DIFFSERV_PRIO_ENABLE , false ) ;
/* replace priority */
ksz_port_cfg ( dev , port , REG_PORT_MRI_MAC_CTRL , PORT_USER_PRIO_CEILING ,
false ) ;
ksz9477_port_cfg32 ( dev , port , REG_PORT_MTI_QUEUE_CTRL_0__4 ,
MTI_PVID_REPLACE , false ) ;
/* enable 802.1p priority */
ksz_port_cfg ( dev , port , P_PRIO_CTRL , PORT_802_1P_PRIO_ENABLE , true ) ;
if ( port < dev - > phy_port_cnt ) {
/* do not force flow control */
ksz_port_cfg ( dev , port , REG_PORT_CTRL_0 ,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL ,
false ) ;
2022-05-17 12:43:26 +03:00
if ( dev - > info - > phy_errata_9477 )
2019-06-12 23:49:05 +03:00
ksz9477_phy_errata_setup ( dev , port ) ;
2018-11-21 02:55:09 +03:00
} else {
/* force flow control */
ksz_port_cfg ( dev , port , REG_PORT_CTRL_0 ,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL ,
true ) ;
/* configure MAC to 1G & RGMII mode */
ksz_pread8 ( dev , port , REG_PORT_XMII_CTRL_1 , & data8 ) ;
2020-09-08 11:01:38 +03:00
switch ( p - > interface ) {
2018-11-21 02:55:09 +03:00
case PHY_INTERFACE_MODE_MII :
2019-03-01 06:57:24 +03:00
ksz9477_set_xmii ( dev , 0 , & data8 ) ;
ksz9477_set_gbit ( dev , false , & data8 ) ;
2018-11-21 02:55:09 +03:00
p - > phydev . speed = SPEED_100 ;
break ;
case PHY_INTERFACE_MODE_RMII :
2019-03-01 06:57:24 +03:00
ksz9477_set_xmii ( dev , 1 , & data8 ) ;
ksz9477_set_gbit ( dev , false , & data8 ) ;
2018-11-21 02:55:09 +03:00
p - > phydev . speed = SPEED_100 ;
break ;
case PHY_INTERFACE_MODE_GMII :
2019-03-01 06:57:24 +03:00
ksz9477_set_xmii ( dev , 2 , & data8 ) ;
ksz9477_set_gbit ( dev , true , & data8 ) ;
2018-11-21 02:55:09 +03:00
p - > phydev . speed = SPEED_1000 ;
break ;
default :
2019-03-01 06:57:24 +03:00
ksz9477_set_xmii ( dev , 3 , & data8 ) ;
ksz9477_set_gbit ( dev , true , & data8 ) ;
2018-11-21 02:55:09 +03:00
data8 & = ~ PORT_RGMII_ID_IG_ENABLE ;
data8 & = ~ PORT_RGMII_ID_EG_ENABLE ;
2020-09-08 11:01:38 +03:00
if ( p - > interface = = PHY_INTERFACE_MODE_RGMII_ID | |
p - > interface = = PHY_INTERFACE_MODE_RGMII_RXID )
2018-11-21 02:55:09 +03:00
data8 | = PORT_RGMII_ID_IG_ENABLE ;
2020-09-08 11:01:38 +03:00
if ( p - > interface = = PHY_INTERFACE_MODE_RGMII_ID | |
p - > interface = = PHY_INTERFACE_MODE_RGMII_TXID )
2018-11-21 02:55:09 +03:00
data8 | = PORT_RGMII_ID_EG_ENABLE ;
2020-09-09 13:04:16 +03:00
/* On KSZ9893, disable RGMII in-band status support */
if ( dev - > features & IS_9893 )
data8 & = ~ PORT_MII_MAC_MODE ;
2018-11-21 02:55:09 +03:00
p - > phydev . speed = SPEED_1000 ;
break ;
}
ksz_pwrite8 ( dev , port , REG_PORT_XMII_CTRL_1 , data8 ) ;
p - > phydev . duplex = 1 ;
}
2021-11-26 15:39:26 +03:00
2020-07-02 18:17:24 +03:00
if ( cpu_port )
2021-11-26 15:39:26 +03:00
member = dsa_user_ports ( ds ) ;
2020-07-02 18:17:24 +03:00
else
2021-11-26 15:39:26 +03:00
member = BIT ( dsa_upstream_port ( ds , port ) ) ;
2018-11-21 02:55:09 +03:00
ksz9477_cfg_port_member ( dev , port , member ) ;
/* clear pending interrupts */
if ( port < dev - > phy_port_cnt )
ksz_pread16 ( dev , port , REG_PORT_PHY_INT_ENABLE , & data16 ) ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_config_cpu_port ( struct dsa_switch * ds )
2018-11-21 02:55:09 +03:00
{
struct ksz_device * dev = ds - > priv ;
struct ksz_port * p ;
int i ;
2022-05-17 12:43:26 +03:00
for ( i = 0 ; i < dev - > info - > port_cnt ; i + + ) {
if ( dsa_is_cpu_port ( ds , i ) & &
( dev - > info - > cpu_ports & ( 1 < < i ) ) ) {
2019-03-01 06:57:24 +03:00
phy_interface_t interface ;
2020-09-09 13:04:15 +03:00
const char * prev_msg ;
const char * prev_mode ;
2019-03-01 06:57:24 +03:00
2018-11-21 02:55:09 +03:00
dev - > cpu_port = i ;
2020-09-08 11:01:38 +03:00
p = & dev - > ports [ i ] ;
2018-11-21 02:55:09 +03:00
2019-03-01 06:57:24 +03:00
/* Read from XMII register to determine host port
* interface . If set specifically in device tree
* note the difference to help debugging .
*/
interface = ksz9477_get_interface ( dev , i ) ;
2020-09-08 11:01:38 +03:00
if ( ! p - > interface ) {
if ( dev - > compat_interface ) {
dev_warn ( dev - > dev ,
" Using legacy switch \" phy-mode \" property, because it is missing on port %d node. "
" Please update your device tree. \n " ,
i ) ;
p - > interface = dev - > compat_interface ;
} else {
p - > interface = interface ;
}
}
2020-09-23 02:45:34 +03:00
if ( interface & & interface ! = p - > interface ) {
2020-09-09 13:04:15 +03:00
prev_msg = " instead of " ;
prev_mode = phy_modes ( interface ) ;
} else {
prev_msg = " " ;
prev_mode = " " ;
}
dev_info ( dev - > dev ,
" Port%d: using phy mode %s%s%s \n " ,
i ,
2020-09-23 02:45:34 +03:00
phy_modes ( p - > interface ) ,
2020-09-09 13:04:15 +03:00
prev_msg ,
prev_mode ) ;
2019-03-01 06:57:24 +03:00
2018-11-21 02:55:09 +03:00
/* enable cpu port */
ksz9477_port_setup ( dev , i , true ) ;
p - > on = 1 ;
}
}
2022-05-17 12:43:26 +03:00
for ( i = 0 ; i < dev - > info - > port_cnt ; i + + ) {
2018-11-21 02:55:09 +03:00
if ( i = = dev - > cpu_port )
continue ;
p = & dev - > ports [ i ] ;
2022-06-17 11:42:51 +03:00
ksz_port_stp_state_set ( ds , i , BR_STATE_DISABLED ) ;
2018-11-21 02:55:09 +03:00
p - > on = 1 ;
if ( i < dev - > phy_port_cnt )
p - > phy = 1 ;
if ( dev - > chip_id = = 0x00947700 & & i = = 6 ) {
p - > sgmii = 1 ;
/* SGMII PHY detection code is not implemented yet. */
p - > phy = 0 ;
}
}
}
2022-06-22 12:04:23 +03:00
int ksz9477_enable_stp_addr ( struct ksz_device * dev )
2022-06-22 12:04:15 +03:00
{
u32 data ;
int ret ;
/* Enable Reserved multicast table */
ksz_cfg ( dev , REG_SW_LUE_CTRL_0 , SW_RESV_MCAST_ENABLE , true ) ;
/* Set the Override bit for forwarding BPDU packet to CPU */
ret = ksz_write32 ( dev , REG_SW_ALU_VAL_B ,
ALU_V_OVERRIDE | BIT ( dev - > cpu_port ) ) ;
if ( ret < 0 )
return ret ;
data = ALU_STAT_START | ALU_RESV_MCAST_ADDR ;
ret = ksz_write32 ( dev , REG_SW_ALU_STAT_CTRL__4 , data ) ;
if ( ret < 0 )
return ret ;
/* wait to be finished */
ret = ksz9477_wait_alu_sta_ready ( dev ) ;
if ( ret < 0 ) {
dev_err ( dev - > dev , " Failed to update Reserved Multicast table \n " ) ;
return ret ;
}
return 0 ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_setup ( struct dsa_switch * ds )
2018-11-21 02:55:09 +03:00
{
struct ksz_device * dev = ds - > priv ;
int ret = 0 ;
2018-12-20 05:59:31 +03:00
/* Required for port partitioning. */
ksz9477_cfg32 ( dev , REG_SW_QM_CTRL__4 , UNICAST_VLAN_BOUNDARY ,
true ) ;
2019-03-01 06:57:24 +03:00
/* Do not work correctly with tail tagging. */
ksz_cfg ( dev , REG_SW_MAC_CTRL_0 , SW_CHECK_LENGTH , false ) ;
2022-03-08 16:58:57 +03:00
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg ( dev , REG_SW_MAC_CTRL_1 , SW_JUMBO_PACKET , true ) ;
/* Now we can configure default MTU value */
ret = regmap_update_bits ( dev - > regmap [ 1 ] , REG_SW_MTU__2 , REG_SW_MTU_MASK ,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN ) ;
if ( ret )
return ret ;
2018-11-21 02:55:09 +03:00
/* queue based egress rate limit */
ksz_cfg ( dev , REG_SW_MAC_CTRL_5 , SW_OUT_RATE_LIMIT_QUEUE_BASED , true ) ;
2019-02-23 03:36:48 +03:00
/* enable global MIB counter freeze function */
ksz_cfg ( dev , REG_SW_MAC_CTRL_6 , SW_MIB_COUNTER_FREEZE , true ) ;
2018-11-21 02:55:09 +03:00
return 0 ;
}
2022-06-22 12:04:23 +03:00
u32 ksz9477_get_port_addr ( int port , int offset )
2018-11-21 02:55:09 +03:00
{
return PORT_CTRL_ADDR ( port , offset ) ;
}
2022-06-22 12:04:23 +03:00
int ksz9477_switch_init ( struct ksz_device * dev )
2022-06-17 11:42:45 +03:00
{
u8 data8 ;
int ret ;
dev - > port_mask = ( 1 < < dev - > info - > port_cnt ) - 1 ;
2018-11-21 02:55:09 +03:00
/* turn off SPI DO Edge select */
ret = ksz_read8 ( dev , REG_SW_GLOBAL_SERIAL_CTRL_0 , & data8 ) ;
if ( ret )
return ret ;
data8 & = ~ SPI_AUTO_EDGE_DETECTION ;
ret = ksz_write8 ( dev , REG_SW_GLOBAL_SERIAL_CTRL_0 , data8 ) ;
if ( ret )
return ret ;
2019-03-01 06:57:24 +03:00
ret = ksz_read8 ( dev , REG_GLOBAL_OPTIONS , & data8 ) ;
2018-11-21 02:55:09 +03:00
if ( ret )
return ret ;
/* Number of ports can be reduced depending on chip. */
dev - > phy_port_cnt = 5 ;
2019-03-01 06:57:24 +03:00
/* Default capability is gigabit capable. */
dev - > features = GBIT_SUPPORT ;
2022-06-17 11:42:45 +03:00
if ( dev - > chip_id = = KSZ9893_CHIP_ID ) {
2019-03-01 06:57:24 +03:00
dev - > features | = IS_9893 ;
/* Chip does not support gigabit. */
if ( data8 & SW_QW_ABLE )
dev - > features & = ~ GBIT_SUPPORT ;
dev - > phy_port_cnt = 2 ;
} else {
/* Chip uses new XMII register definitions. */
dev - > features | = NEW_XMII ;
/* Chip does not support gigabit. */
if ( ! ( data8 & SW_GIGABIT_ABLE ) )
dev - > features & = ~ GBIT_SUPPORT ;
}
2022-06-22 12:04:20 +03:00
2018-11-21 02:55:09 +03:00
return 0 ;
}
2022-06-22 12:04:23 +03:00
void ksz9477_switch_exit ( struct ksz_device * dev )
2018-11-21 02:55:09 +03:00
{
ksz9477_reset_switch ( dev ) ;
}
MODULE_AUTHOR ( " Woojung Huh <Woojung.Huh@microchip.com> " ) ;
MODULE_DESCRIPTION ( " Microchip KSZ9477 Series Switch DSA Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;