2019-07-29 19:49:47 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip KSZ8795 switch driver
*
* Copyright ( C ) 2017 Microchip Technology Inc .
* Tristram Ha < Tristram . Ha @ microchip . com >
*/
2021-06-14 06:31:23 +02:00
# include <linux/bitfield.h>
2019-07-29 19:49:47 +02:00
# include <linux/delay.h>
# include <linux/export.h>
# include <linux/gpio.h>
2021-12-28 16:49:13 -08:00
# include <linux/if_vlan.h>
2019-07-29 19:49:47 +02:00
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/platform_data/microchip-ksz.h>
# include <linux/phy.h>
# include <linux/etherdevice.h>
# include <linux/if_bridge.h>
2021-06-14 06:31:18 +02:00
# include <linux/micrel_phy.h>
2019-07-29 19:49:47 +02:00
# include <net/dsa.h>
# include <net/switchdev.h>
2021-06-14 06:31:19 +02:00
# include <linux/phylink.h>
2019-07-29 19:49:47 +02:00
# include "ksz_common.h"
# include "ksz8795_reg.h"
2021-04-27 09:09:03 +02:00
# include "ksz8.h"
2019-07-29 19:49:47 +02:00
static void ksz_cfg ( struct ksz_device * dev , u32 addr , u8 bits , bool set )
{
regmap_update_bits ( dev - > regmap [ 0 ] , addr , bits , set ? bits : 0 ) ;
}
static void ksz_port_cfg ( struct ksz_device * dev , int port , int offset , u8 bits ,
bool set )
{
regmap_update_bits ( dev - > regmap [ 0 ] , PORT_CTRL_ADDR ( port , offset ) ,
bits , set ? bits : 0 ) ;
}
2022-03-16 13:55:29 +01:00
static int ksz8_ind_write8 ( struct ksz_device * dev , u8 table , u16 addr , u8 data )
{
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2022-03-16 13:55:29 +01:00
u16 ctrl_addr ;
int ret = 0 ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2022-03-16 13:55:29 +01:00
mutex_lock ( & dev - > alu_mutex ) ;
ctrl_addr = IND_ACC_TABLE ( table ) | addr ;
ret = ksz_write8 ( dev , regs [ REG_IND_BYTE ] , data ) ;
if ( ! ret )
ret = ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
mutex_unlock ( & dev - > alu_mutex ) ;
return ret ;
}
2022-06-22 14:34:23 +05:30
int ksz8_reset_switch ( struct ksz_device * dev )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) ) {
/* reset switch */
ksz_cfg ( dev , KSZ8863_REG_SW_RESET ,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET , true ) ;
ksz_cfg ( dev , KSZ8863_REG_SW_RESET ,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET , false ) ;
} else {
/* reset switch */
ksz_write8 ( dev , REG_POWER_MANAGEMENT_1 ,
SW_SOFTWARE_POWER_DOWN < < SW_POWER_MANAGEMENT_MODE_S ) ;
ksz_write8 ( dev , REG_POWER_MANAGEMENT_1 , 0 ) ;
}
2019-07-29 19:49:47 +02:00
return 0 ;
}
net: dsa: microchip: ksz8: add MTU configuration support
Make MTU configurable on KSZ87xx and KSZ88xx series of switches.
Before this patch, pre-configured behavior was different on different
switch series, due to opposite meaning of the same bit:
- KSZ87xx: Reg 4, Bit 1 - if 1, max frame size is 1532; if 0 - 1514
- KSZ88xx: Reg 4, Bit 1 - if 1, max frame size is 1514; if 0 - 1532
Since the code was telling "... SW_LEGAL_PACKET_DISABLE, true)", I
assume, the idea was to set max frame size to 1532.
With this patch, by setting MTU size 1500, both switch series will be
configured to the 1532 frame limit.
This patch was tested on KSZ8873.
Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
Acked-by: Arun Ramadoss <arun.ramadoss@microchip.com>
Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-12-05 06:22:30 +01:00
static int ksz8863_change_mtu ( struct ksz_device * dev , int frame_size )
{
u8 ctrl2 = 0 ;
if ( frame_size < = KSZ8_LEGAL_PACKET_SIZE )
ctrl2 | = KSZ8863_LEGAL_PACKET_ENABLE ;
else if ( frame_size > KSZ8863_NORMAL_PACKET_SIZE )
ctrl2 | = KSZ8863_HUGE_PACKET_ENABLE ;
return ksz_rmw8 ( dev , REG_SW_CTRL_2 , KSZ8863_LEGAL_PACKET_ENABLE |
KSZ8863_HUGE_PACKET_ENABLE , ctrl2 ) ;
}
static int ksz8795_change_mtu ( struct ksz_device * dev , int frame_size )
{
u8 ctrl1 = 0 , ctrl2 = 0 ;
int ret ;
if ( frame_size > KSZ8_LEGAL_PACKET_SIZE )
ctrl2 | = SW_LEGAL_PACKET_DISABLE ;
else if ( frame_size > KSZ8863_NORMAL_PACKET_SIZE )
ctrl1 | = SW_HUGE_PACKET ;
ret = ksz_rmw8 ( dev , REG_SW_CTRL_1 , SW_HUGE_PACKET , ctrl1 ) ;
if ( ret )
return ret ;
return ksz_rmw8 ( dev , REG_SW_CTRL_2 , SW_LEGAL_PACKET_DISABLE , ctrl2 ) ;
}
int ksz8_change_mtu ( struct ksz_device * dev , int port , int mtu )
{
u16 frame_size ;
if ( ! dsa_is_cpu_port ( dev - > ds , port ) )
return 0 ;
frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN ;
switch ( dev - > chip_id ) {
case KSZ8795_CHIP_ID :
case KSZ8794_CHIP_ID :
case KSZ8765_CHIP_ID :
return ksz8795_change_mtu ( dev , frame_size ) ;
case KSZ8830_CHIP_ID :
return ksz8863_change_mtu ( dev , frame_size ) ;
}
return - EOPNOTSUPP ;
}
2019-07-29 19:49:47 +02:00
static void ksz8795_set_prio_queue ( struct ksz_device * dev , int port , int queue )
{
u8 hi , lo ;
/* Number of queues can only be 1, 2, or 4. */
switch ( queue ) {
case 4 :
case 3 :
queue = PORT_QUEUE_SPLIT_4 ;
break ;
case 2 :
queue = PORT_QUEUE_SPLIT_2 ;
break ;
default :
queue = PORT_QUEUE_SPLIT_1 ;
}
ksz_pread8 ( dev , port , REG_PORT_CTRL_0 , & lo ) ;
ksz_pread8 ( dev , port , P_DROP_TAG_CTRL , & hi ) ;
lo & = ~ PORT_QUEUE_SPLIT_L ;
if ( queue & PORT_QUEUE_SPLIT_2 )
lo | = PORT_QUEUE_SPLIT_L ;
hi & = ~ PORT_QUEUE_SPLIT_H ;
if ( queue & PORT_QUEUE_SPLIT_4 )
hi | = PORT_QUEUE_SPLIT_H ;
ksz_pwrite8 ( dev , port , REG_PORT_CTRL_0 , lo ) ;
ksz_pwrite8 ( dev , port , P_DROP_TAG_CTRL , hi ) ;
/* Default is port based for egress rate limit. */
if ( queue ! = PORT_QUEUE_SPLIT_1 )
ksz_cfg ( dev , REG_SW_CTRL_19 , SW_OUT_RATE_LIMIT_QUEUE_BASED ,
true ) ;
}
2022-06-22 14:34:23 +05:30
void ksz8_r_mib_cnt ( struct ksz_device * dev , int port , u16 addr , u64 * cnt )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:03 +02:00
const u32 * masks ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2019-07-29 19:49:47 +02:00
u16 ctrl_addr ;
u32 data ;
u8 check ;
int loop ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2021-04-27 09:09:03 +02:00
2022-05-17 15:13:28 +05:30
ctrl_addr = addr + dev - > info - > reg_mib_cnt * port ;
2019-07-29 19:49:47 +02:00
ctrl_addr | = IND_ACC_TABLE ( TABLE_MIB | TABLE_READ ) ;
mutex_lock ( & dev - > alu_mutex ) ;
2021-04-27 09:09:03 +02:00
ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
2019-07-29 19:49:47 +02:00
/* It is almost guaranteed to always read the valid bit because of
* slow SPI speed .
*/
for ( loop = 2 ; loop > 0 ; loop - - ) {
2021-04-27 09:09:03 +02:00
ksz_read8 ( dev , regs [ REG_IND_MIB_CHECK ] , & check ) ;
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:03 +02:00
if ( check & masks [ MIB_COUNTER_VALID ] ) {
ksz_read32 ( dev , regs [ REG_IND_DATA_LO ] , & data ) ;
if ( check & masks [ MIB_COUNTER_OVERFLOW ] )
2019-07-29 19:49:47 +02:00
* cnt + = MIB_COUNTER_VALUE + 1 ;
* cnt + = data & MIB_COUNTER_VALUE ;
break ;
}
}
mutex_unlock ( & dev - > alu_mutex ) ;
}
2021-04-27 09:09:04 +02:00
static void ksz8795_r_mib_pkt ( struct ksz_device * dev , int port , u16 addr ,
u64 * dropped , u64 * cnt )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:03 +02:00
const u32 * masks ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2019-07-29 19:49:47 +02:00
u16 ctrl_addr ;
u32 data ;
u8 check ;
int loop ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2021-04-27 09:09:03 +02:00
2022-05-17 15:13:28 +05:30
addr - = dev - > info - > reg_mib_cnt ;
2021-04-27 09:09:04 +02:00
ctrl_addr = ( KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0 ) * port ;
ctrl_addr + = addr + KSZ8795_MIB_TOTAL_RX_0 ;
2019-07-29 19:49:47 +02:00
ctrl_addr | = IND_ACC_TABLE ( TABLE_MIB | TABLE_READ ) ;
mutex_lock ( & dev - > alu_mutex ) ;
2021-04-27 09:09:03 +02:00
ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
2019-07-29 19:49:47 +02:00
/* It is almost guaranteed to always read the valid bit because of
* slow SPI speed .
*/
for ( loop = 2 ; loop > 0 ; loop - - ) {
2021-04-27 09:09:03 +02:00
ksz_read8 ( dev , regs [ REG_IND_MIB_CHECK ] , & check ) ;
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:03 +02:00
if ( check & masks [ MIB_COUNTER_VALID ] ) {
ksz_read32 ( dev , regs [ REG_IND_DATA_LO ] , & data ) ;
2019-07-29 19:49:47 +02:00
if ( addr < 2 ) {
u64 total ;
total = check & MIB_TOTAL_BYTES_H ;
total < < = 32 ;
* cnt + = total ;
* cnt + = data ;
2021-04-27 09:09:03 +02:00
if ( check & masks [ MIB_COUNTER_OVERFLOW ] ) {
2019-07-29 19:49:47 +02:00
total = MIB_TOTAL_BYTES_H + 1 ;
total < < = 32 ;
* cnt + = total ;
}
} else {
2021-04-27 09:09:03 +02:00
if ( check & masks [ MIB_COUNTER_OVERFLOW ] )
2019-07-29 19:49:47 +02:00
* cnt + = MIB_PACKET_DROPPED + 1 ;
* cnt + = data & MIB_PACKET_DROPPED ;
}
break ;
}
}
mutex_unlock ( & dev - > alu_mutex ) ;
}
2021-04-27 09:09:04 +02:00
static void ksz8863_r_mib_pkt ( struct ksz_device * dev , int port , u16 addr ,
u64 * dropped , u64 * cnt )
{
u32 * last = ( u32 * ) dropped ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2021-04-27 09:09:04 +02:00
u16 ctrl_addr ;
u32 data ;
u32 cur ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2022-05-17 15:13:28 +05:30
addr - = dev - > info - > reg_mib_cnt ;
2021-04-27 09:09:04 +02:00
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0 ;
ctrl_addr + = port ;
ctrl_addr | = IND_ACC_TABLE ( TABLE_MIB | TABLE_READ ) ;
mutex_lock ( & dev - > alu_mutex ) ;
ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
ksz_read32 ( dev , regs [ REG_IND_DATA_LO ] , & data ) ;
mutex_unlock ( & dev - > alu_mutex ) ;
data & = MIB_PACKET_DROPPED ;
cur = last [ addr ] ;
if ( data ! = cur ) {
last [ addr ] = data ;
if ( data < cur )
data + = MIB_PACKET_DROPPED + 1 ;
data - = cur ;
* cnt + = data ;
}
}
2022-06-22 14:34:23 +05:30
void ksz8_r_mib_pkt ( struct ksz_device * dev , int port , u16 addr ,
u64 * dropped , u64 * cnt )
2021-04-27 09:09:04 +02:00
{
if ( ksz_is_ksz88x3 ( dev ) )
ksz8863_r_mib_pkt ( dev , port , addr , dropped , cnt ) ;
else
ksz8795_r_mib_pkt ( dev , port , addr , dropped , cnt ) ;
}
2022-06-22 14:34:23 +05:30
void ksz8_freeze_mib ( struct ksz_device * dev , int port , bool freeze )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) )
return ;
2019-07-29 19:49:47 +02:00
/* enable the port for flush/freeze function */
if ( freeze )
ksz_cfg ( dev , REG_SW_CTRL_6 , BIT ( port ) , true ) ;
ksz_cfg ( dev , REG_SW_CTRL_6 , SW_MIB_COUNTER_FREEZE , freeze ) ;
/* disable the port after freeze is done */
if ( ! freeze )
ksz_cfg ( dev , REG_SW_CTRL_6 , BIT ( port ) , false ) ;
}
2022-06-22 14:34:23 +05:30
void ksz8_port_init_cnt ( struct ksz_device * dev , int port )
2019-07-29 19:49:47 +02:00
{
struct ksz_port_mib * mib = & dev - > ports [ port ] . mib ;
2021-04-27 09:09:04 +02:00
u64 * dropped ;
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:04 +02:00
if ( ! ksz_is_ksz88x3 ( dev ) ) {
/* flush all enabled port MIB counters */
ksz_cfg ( dev , REG_SW_CTRL_6 , BIT ( port ) , true ) ;
ksz_cfg ( dev , REG_SW_CTRL_6 , SW_MIB_COUNTER_FLUSH , true ) ;
ksz_cfg ( dev , REG_SW_CTRL_6 , BIT ( port ) , false ) ;
}
2019-07-29 19:49:47 +02:00
mib - > cnt_ptr = 0 ;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
2022-05-17 15:13:28 +05:30
while ( mib - > cnt_ptr < dev - > info - > reg_mib_cnt ) {
2019-07-29 19:49:47 +02:00
dev - > dev_ops - > r_mib_cnt ( dev , port , mib - > cnt_ptr ,
& mib - > counters [ mib - > cnt_ptr ] ) ;
+ + mib - > cnt_ptr ;
}
2021-04-27 09:09:04 +02:00
/* last one in storage */
2022-05-17 15:13:28 +05:30
dropped = & mib - > counters [ dev - > info - > mib_cnt ] ;
2021-04-27 09:09:04 +02:00
2019-07-29 19:49:47 +02:00
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
2022-05-17 15:13:28 +05:30
while ( mib - > cnt_ptr < dev - > info - > mib_cnt ) {
2019-07-29 19:49:47 +02:00
dev - > dev_ops - > r_mib_pkt ( dev , port , mib - > cnt_ptr ,
2021-04-27 09:09:04 +02:00
dropped , & mib - > counters [ mib - > cnt_ptr ] ) ;
2019-07-29 19:49:47 +02:00
+ + mib - > cnt_ptr ;
}
}
2021-04-27 09:09:01 +02:00
static void ksz8_r_table ( struct ksz_device * dev , int table , u16 addr , u64 * data )
2019-07-29 19:49:47 +02:00
{
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2019-07-29 19:49:47 +02:00
u16 ctrl_addr ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2019-07-29 19:49:47 +02:00
ctrl_addr = IND_ACC_TABLE ( table | TABLE_READ ) | addr ;
mutex_lock ( & dev - > alu_mutex ) ;
2021-04-27 09:09:03 +02:00
ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
ksz_read64 ( dev , regs [ REG_IND_DATA_HI ] , data ) ;
2019-07-29 19:49:47 +02:00
mutex_unlock ( & dev - > alu_mutex ) ;
}
2021-04-27 09:09:01 +02:00
static void ksz8_w_table ( struct ksz_device * dev , int table , u16 addr , u64 data )
2019-07-29 19:49:47 +02:00
{
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2019-07-29 19:49:47 +02:00
u16 ctrl_addr ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2019-07-29 19:49:47 +02:00
ctrl_addr = IND_ACC_TABLE ( table ) | addr ;
mutex_lock ( & dev - > alu_mutex ) ;
2021-04-27 09:09:03 +02:00
ksz_write64 ( dev , regs [ REG_IND_DATA_HI ] , data ) ;
ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
2019-07-29 19:49:47 +02:00
mutex_unlock ( & dev - > alu_mutex ) ;
}
2021-04-27 09:09:01 +02:00
static int ksz8_valid_dyn_entry ( struct ksz_device * dev , u8 * data )
2019-07-29 19:49:47 +02:00
{
int timeout = 100 ;
2021-04-27 09:09:03 +02:00
const u32 * masks ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2021-04-27 09:09:03 +02:00
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2019-07-29 19:49:47 +02:00
do {
2021-04-27 09:09:03 +02:00
ksz_read8 ( dev , regs [ REG_IND_DATA_CHECK ] , data ) ;
2019-07-29 19:49:47 +02:00
timeout - - ;
2021-04-27 09:09:03 +02:00
} while ( ( * data & masks [ DYNAMIC_MAC_TABLE_NOT_READY ] ) & & timeout ) ;
2019-07-29 19:49:47 +02:00
/* Entry is not ready for accessing. */
2021-04-27 09:09:03 +02:00
if ( * data & masks [ DYNAMIC_MAC_TABLE_NOT_READY ] ) {
2019-07-29 19:49:47 +02:00
return - EAGAIN ;
/* Entry is ready for accessing. */
} else {
2021-04-27 09:09:03 +02:00
ksz_read8 ( dev , regs [ REG_IND_DATA_8 ] , data ) ;
2019-07-29 19:49:47 +02:00
/* There is no valid entry in the table. */
2021-04-27 09:09:03 +02:00
if ( * data & masks [ DYNAMIC_MAC_TABLE_MAC_EMPTY ] )
2019-07-29 19:49:47 +02:00
return - ENXIO ;
}
return 0 ;
}
2022-06-22 14:34:23 +05:30
int ksz8_r_dyn_mac_table ( struct ksz_device * dev , u16 addr , u8 * mac_addr ,
u8 * fid , u8 * src_port , u8 * timestamp , u16 * entries )
2019-07-29 19:49:47 +02:00
{
u32 data_hi , data_lo ;
2021-04-27 09:09:03 +02:00
const u8 * shifts ;
const u32 * masks ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2019-07-29 19:49:47 +02:00
u16 ctrl_addr ;
u8 data ;
int rc ;
2022-06-28 22:43:25 +05:30
shifts = dev - > info - > shifts ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2021-04-27 09:09:03 +02:00
2019-07-29 19:49:47 +02:00
ctrl_addr = IND_ACC_TABLE ( TABLE_DYNAMIC_MAC | TABLE_READ ) | addr ;
mutex_lock ( & dev - > alu_mutex ) ;
2021-04-27 09:09:03 +02:00
ksz_write16 ( dev , regs [ REG_IND_CTRL_0 ] , ctrl_addr ) ;
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:01 +02:00
rc = ksz8_valid_dyn_entry ( dev , & data ) ;
2019-07-29 19:49:47 +02:00
if ( rc = = - EAGAIN ) {
if ( addr = = 0 )
* entries = 0 ;
} else if ( rc = = - ENXIO ) {
* entries = 0 ;
/* At least one valid entry in the table. */
} else {
u64 buf = 0 ;
int cnt ;
2021-04-27 09:09:03 +02:00
ksz_read64 ( dev , regs [ REG_IND_DATA_HI ] , & buf ) ;
2019-07-29 19:49:47 +02:00
data_hi = ( u32 ) ( buf > > 32 ) ;
data_lo = ( u32 ) buf ;
/* Check out how many valid entry in the table. */
2021-04-27 09:09:03 +02:00
cnt = data & masks [ DYNAMIC_MAC_TABLE_ENTRIES_H ] ;
cnt < < = shifts [ DYNAMIC_MAC_ENTRIES_H ] ;
cnt | = ( data_hi & masks [ DYNAMIC_MAC_TABLE_ENTRIES ] ) > >
shifts [ DYNAMIC_MAC_ENTRIES ] ;
2019-07-29 19:49:47 +02:00
* entries = cnt + 1 ;
2021-04-27 09:09:03 +02:00
* fid = ( data_hi & masks [ DYNAMIC_MAC_TABLE_FID ] ) > >
shifts [ DYNAMIC_MAC_FID ] ;
* src_port = ( data_hi & masks [ DYNAMIC_MAC_TABLE_SRC_PORT ] ) > >
shifts [ DYNAMIC_MAC_SRC_PORT ] ;
* timestamp = ( data_hi & masks [ DYNAMIC_MAC_TABLE_TIMESTAMP ] ) > >
shifts [ DYNAMIC_MAC_TIMESTAMP ] ;
2019-07-29 19:49:47 +02:00
mac_addr [ 5 ] = ( u8 ) data_lo ;
mac_addr [ 4 ] = ( u8 ) ( data_lo > > 8 ) ;
mac_addr [ 3 ] = ( u8 ) ( data_lo > > 16 ) ;
mac_addr [ 2 ] = ( u8 ) ( data_lo > > 24 ) ;
mac_addr [ 1 ] = ( u8 ) data_hi ;
mac_addr [ 0 ] = ( u8 ) ( data_hi > > 8 ) ;
rc = 0 ;
}
mutex_unlock ( & dev - > alu_mutex ) ;
return rc ;
}
2022-06-22 14:34:23 +05:30
int ksz8_r_sta_mac_table ( struct ksz_device * dev , u16 addr ,
struct alu_struct * alu )
2019-07-29 19:49:47 +02:00
{
u32 data_hi , data_lo ;
2021-04-27 09:09:03 +02:00
const u8 * shifts ;
const u32 * masks ;
2019-07-29 19:49:47 +02:00
u64 data ;
2022-06-28 22:43:25 +05:30
shifts = dev - > info - > shifts ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2021-04-27 09:09:03 +02:00
2021-04-27 09:09:01 +02:00
ksz8_r_table ( dev , TABLE_STATIC_MAC , addr , & data ) ;
2019-07-29 19:49:47 +02:00
data_hi = data > > 32 ;
data_lo = ( u32 ) data ;
2021-04-27 09:09:03 +02:00
if ( data_hi & ( masks [ STATIC_MAC_TABLE_VALID ] |
masks [ STATIC_MAC_TABLE_OVERRIDE ] ) ) {
2019-07-29 19:49:47 +02:00
alu - > mac [ 5 ] = ( u8 ) data_lo ;
alu - > mac [ 4 ] = ( u8 ) ( data_lo > > 8 ) ;
alu - > mac [ 3 ] = ( u8 ) ( data_lo > > 16 ) ;
alu - > mac [ 2 ] = ( u8 ) ( data_lo > > 24 ) ;
alu - > mac [ 1 ] = ( u8 ) data_hi ;
alu - > mac [ 0 ] = ( u8 ) ( data_hi > > 8 ) ;
2021-04-27 09:09:03 +02:00
alu - > port_forward =
( data_hi & masks [ STATIC_MAC_TABLE_FWD_PORTS ] ) > >
shifts [ STATIC_MAC_FWD_PORTS ] ;
2019-07-29 19:49:47 +02:00
alu - > is_override =
2021-04-27 09:09:03 +02:00
( data_hi & masks [ STATIC_MAC_TABLE_OVERRIDE ] ) ? 1 : 0 ;
2019-07-29 19:49:47 +02:00
data_hi > > = 1 ;
2021-04-27 09:09:03 +02:00
alu - > is_static = true ;
alu - > is_use_fid =
( data_hi & masks [ STATIC_MAC_TABLE_USE_FID ] ) ? 1 : 0 ;
alu - > fid = ( data_hi & masks [ STATIC_MAC_TABLE_FID ] ) > >
shifts [ STATIC_MAC_FID ] ;
2019-07-29 19:49:47 +02:00
return 0 ;
}
return - ENXIO ;
}
2022-06-22 14:34:23 +05:30
void ksz8_w_sta_mac_table ( struct ksz_device * dev , u16 addr ,
struct alu_struct * alu )
2019-07-29 19:49:47 +02:00
{
u32 data_hi , data_lo ;
2021-04-27 09:09:03 +02:00
const u8 * shifts ;
const u32 * masks ;
2019-07-29 19:49:47 +02:00
u64 data ;
2022-06-28 22:43:25 +05:30
shifts = dev - > info - > shifts ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2021-04-27 09:09:03 +02:00
2019-07-29 19:49:47 +02:00
data_lo = ( ( u32 ) alu - > mac [ 2 ] < < 24 ) |
( ( u32 ) alu - > mac [ 3 ] < < 16 ) |
( ( u32 ) alu - > mac [ 4 ] < < 8 ) | alu - > mac [ 5 ] ;
data_hi = ( ( u32 ) alu - > mac [ 0 ] < < 8 ) | alu - > mac [ 1 ] ;
2021-04-27 09:09:03 +02:00
data_hi | = ( u32 ) alu - > port_forward < < shifts [ STATIC_MAC_FWD_PORTS ] ;
2019-07-29 19:49:47 +02:00
if ( alu - > is_override )
2021-04-27 09:09:03 +02:00
data_hi | = masks [ STATIC_MAC_TABLE_OVERRIDE ] ;
2019-07-29 19:49:47 +02:00
if ( alu - > is_use_fid ) {
2021-04-27 09:09:03 +02:00
data_hi | = masks [ STATIC_MAC_TABLE_USE_FID ] ;
data_hi | = ( u32 ) alu - > fid < < shifts [ STATIC_MAC_FID ] ;
2019-07-29 19:49:47 +02:00
}
if ( alu - > is_static )
2021-04-27 09:09:03 +02:00
data_hi | = masks [ STATIC_MAC_TABLE_VALID ] ;
2019-07-29 19:49:47 +02:00
else
2021-04-27 09:09:03 +02:00
data_hi & = ~ masks [ STATIC_MAC_TABLE_OVERRIDE ] ;
2019-07-29 19:49:47 +02:00
data = ( u64 ) data_hi < < 32 | data_lo ;
2021-04-27 09:09:01 +02:00
ksz8_w_table ( dev , TABLE_STATIC_MAC , addr , data ) ;
2019-07-29 19:49:47 +02:00
}
2021-04-27 09:09:03 +02:00
static void ksz8_from_vlan ( struct ksz_device * dev , u32 vlan , u8 * fid ,
u8 * member , u8 * valid )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:03 +02:00
const u8 * shifts ;
const u32 * masks ;
2022-06-28 22:43:25 +05:30
shifts = dev - > info - > shifts ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2021-04-27 09:09:03 +02:00
* fid = vlan & masks [ VLAN_TABLE_FID ] ;
* member = ( vlan & masks [ VLAN_TABLE_MEMBERSHIP ] ) > >
shifts [ VLAN_TABLE_MEMBERSHIP_S ] ;
* valid = ! ! ( vlan & masks [ VLAN_TABLE_VALID ] ) ;
2019-07-29 19:49:47 +02:00
}
2021-04-27 09:09:03 +02:00
static void ksz8_to_vlan ( struct ksz_device * dev , u8 fid , u8 member , u8 valid ,
u16 * vlan )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:03 +02:00
const u8 * shifts ;
const u32 * masks ;
2022-06-28 22:43:25 +05:30
shifts = dev - > info - > shifts ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2021-04-27 09:09:03 +02:00
2019-07-29 19:49:47 +02:00
* vlan = fid ;
2021-04-27 09:09:03 +02:00
* vlan | = ( u16 ) member < < shifts [ VLAN_TABLE_MEMBERSHIP_S ] ;
2019-07-29 19:49:47 +02:00
if ( valid )
2021-04-27 09:09:03 +02:00
* vlan | = masks [ VLAN_TABLE_VALID ] ;
2019-07-29 19:49:47 +02:00
}
2021-04-27 09:09:01 +02:00
static void ksz8_r_vlan_entries ( struct ksz_device * dev , u16 addr )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:03 +02:00
const u8 * shifts ;
2019-07-29 19:49:47 +02:00
u64 data ;
int i ;
2022-06-28 22:43:25 +05:30
shifts = dev - > info - > shifts ;
2021-04-27 09:09:03 +02:00
2021-04-27 09:09:01 +02:00
ksz8_r_table ( dev , TABLE_VLAN , addr , & data ) ;
2021-08-10 01:00:15 +02:00
addr * = 4 ;
for ( i = 0 ; i < 4 ; i + + ) {
2019-07-29 19:49:47 +02:00
dev - > vlan_cache [ addr + i ] . table [ 0 ] = ( u16 ) data ;
2021-04-27 09:09:03 +02:00
data > > = shifts [ VLAN_TABLE ] ;
2019-07-29 19:49:47 +02:00
}
}
2021-04-27 09:09:01 +02:00
static void ksz8_r_vlan_table ( struct ksz_device * dev , u16 vid , u16 * vlan )
2019-07-29 19:49:47 +02:00
{
int index ;
u16 * data ;
u16 addr ;
u64 buf ;
data = ( u16 * ) & buf ;
2021-08-10 01:00:15 +02:00
addr = vid / 4 ;
2019-07-29 19:49:47 +02:00
index = vid & 3 ;
2021-04-27 09:09:01 +02:00
ksz8_r_table ( dev , TABLE_VLAN , addr , & buf ) ;
2019-07-29 19:49:47 +02:00
* vlan = data [ index ] ;
}
2021-04-27 09:09:01 +02:00
static void ksz8_w_vlan_table ( struct ksz_device * dev , u16 vid , u16 vlan )
2019-07-29 19:49:47 +02:00
{
int index ;
u16 * data ;
u16 addr ;
u64 buf ;
data = ( u16 * ) & buf ;
2021-08-10 01:00:15 +02:00
addr = vid / 4 ;
2019-07-29 19:49:47 +02:00
index = vid & 3 ;
2021-04-27 09:09:01 +02:00
ksz8_r_table ( dev , TABLE_VLAN , addr , & buf ) ;
2019-07-29 19:49:47 +02:00
data [ index ] = vlan ;
dev - > vlan_cache [ vid ] . table [ 0 ] = vlan ;
2021-04-27 09:09:01 +02:00
ksz8_w_table ( dev , TABLE_VLAN , addr , buf ) ;
2019-07-29 19:49:47 +02:00
}
2022-08-26 12:56:21 +02:00
int ksz8_r_phy ( struct ksz_device * dev , u16 phy , u16 reg , u16 * val )
2019-07-29 19:49:47 +02:00
{
u8 restart , speed , ctrl , link ;
int processed = true ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2021-06-14 06:31:23 +02:00
u8 val1 , val2 ;
2019-07-29 19:49:47 +02:00
u16 data = 0 ;
u8 p = phy ;
2022-08-26 12:56:24 +02:00
int ret ;
2019-07-29 19:49:47 +02:00
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2019-07-29 19:49:47 +02:00
switch ( reg ) {
2021-06-14 06:31:18 +02:00
case MII_BMCR :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_NEG_RESTART_CTRL ] , & restart ) ;
if ( ret )
return ret ;
ret = ksz_pread8 ( dev , p , regs [ P_SPEED_STATUS ] , & speed ) ;
if ( ret )
return ret ;
ret = ksz_pread8 ( dev , p , regs [ P_FORCE_CTRL ] , & ctrl ) ;
if ( ret )
return ret ;
2019-07-29 19:49:47 +02:00
if ( restart & PORT_PHY_LOOPBACK )
2021-06-14 06:31:18 +02:00
data | = BMCR_LOOPBACK ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_FORCE_100_MBIT )
2021-06-14 06:31:18 +02:00
data | = BMCR_SPEED100 ;
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) ) {
if ( ( ctrl & PORT_AUTO_NEG_ENABLE ) )
2021-06-14 06:31:18 +02:00
data | = BMCR_ANENABLE ;
2021-04-27 09:09:04 +02:00
} else {
if ( ! ( ctrl & PORT_AUTO_NEG_DISABLE ) )
2021-06-14 06:31:18 +02:00
data | = BMCR_ANENABLE ;
2021-04-27 09:09:04 +02:00
}
2019-07-29 19:49:47 +02:00
if ( restart & PORT_POWER_DOWN )
2021-06-14 06:31:18 +02:00
data | = BMCR_PDOWN ;
2019-07-29 19:49:47 +02:00
if ( restart & PORT_AUTO_NEG_RESTART )
2021-06-14 06:31:18 +02:00
data | = BMCR_ANRESTART ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_FORCE_FULL_DUPLEX )
2021-06-14 06:31:18 +02:00
data | = BMCR_FULLDPLX ;
2019-07-29 19:49:47 +02:00
if ( speed & PORT_HP_MDIX )
2021-06-14 06:31:18 +02:00
data | = KSZ886X_BMCR_HP_MDIX ;
2019-07-29 19:49:47 +02:00
if ( restart & PORT_FORCE_MDIX )
2021-06-14 06:31:18 +02:00
data | = KSZ886X_BMCR_FORCE_MDI ;
2019-07-29 19:49:47 +02:00
if ( restart & PORT_AUTO_MDIX_DISABLE )
2021-06-14 06:31:18 +02:00
data | = KSZ886X_BMCR_DISABLE_AUTO_MDIX ;
2019-07-29 19:49:47 +02:00
if ( restart & PORT_TX_DISABLE )
2021-06-14 06:31:18 +02:00
data | = KSZ886X_BMCR_DISABLE_TRANSMIT ;
2019-07-29 19:49:47 +02:00
if ( restart & PORT_LED_OFF )
2021-06-14 06:31:18 +02:00
data | = KSZ886X_BMCR_DISABLE_LED ;
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:18 +02:00
case MII_BMSR :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_LINK_STATUS ] , & link ) ;
if ( ret )
return ret ;
2021-06-14 06:31:18 +02:00
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
BMSR_10HALF |
BMSR_ANEGCAPABLE ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_AUTO_NEG_COMPLETE )
2021-06-14 06:31:18 +02:00
data | = BMSR_ANEGCOMPLETE ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_STAT_LINK_GOOD )
2021-06-14 06:31:18 +02:00
data | = BMSR_LSTATUS ;
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:18 +02:00
case MII_PHYSID1 :
2019-07-29 19:49:47 +02:00
data = KSZ8795_ID_HI ;
break ;
2021-06-14 06:31:18 +02:00
case MII_PHYSID2 :
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) )
data = KSZ8863_ID_LO ;
else
data = KSZ8795_ID_LO ;
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:18 +02:00
case MII_ADVERTISE :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_LOCAL_CTRL ] , & ctrl ) ;
if ( ret )
return ret ;
2021-06-14 06:31:18 +02:00
data = ADVERTISE_CSMA ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_AUTO_NEG_SYM_PAUSE )
2021-06-14 06:31:18 +02:00
data | = ADVERTISE_PAUSE_CAP ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_AUTO_NEG_100BTX_FD )
2021-06-14 06:31:18 +02:00
data | = ADVERTISE_100FULL ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_AUTO_NEG_100BTX )
2021-06-14 06:31:18 +02:00
data | = ADVERTISE_100HALF ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_AUTO_NEG_10BT_FD )
2021-06-14 06:31:18 +02:00
data | = ADVERTISE_10FULL ;
2019-07-29 19:49:47 +02:00
if ( ctrl & PORT_AUTO_NEG_10BT )
2021-06-14 06:31:18 +02:00
data | = ADVERTISE_10HALF ;
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:18 +02:00
case MII_LPA :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_REMOTE_STATUS ] , & link ) ;
if ( ret )
return ret ;
2021-06-14 06:31:18 +02:00
data = LPA_SLCT ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_REMOTE_SYM_PAUSE )
2021-06-14 06:31:18 +02:00
data | = LPA_PAUSE_CAP ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_REMOTE_100BTX_FD )
2021-06-14 06:31:18 +02:00
data | = LPA_100FULL ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_REMOTE_100BTX )
2021-06-14 06:31:18 +02:00
data | = LPA_100HALF ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_REMOTE_10BT_FD )
2021-06-14 06:31:18 +02:00
data | = LPA_10FULL ;
2019-07-29 19:49:47 +02:00
if ( link & PORT_REMOTE_10BT )
2021-06-14 06:31:18 +02:00
data | = LPA_10HALF ;
if ( data & ~ LPA_SLCT )
data | = LPA_LPACK ;
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:23 +02:00
case PHY_REG_LINK_MD :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , REG_PORT_LINK_MD_CTRL , & val1 ) ;
if ( ret )
return ret ;
ret = ksz_pread8 ( dev , p , REG_PORT_LINK_MD_RESULT , & val2 ) ;
if ( ret )
return ret ;
2021-06-14 06:31:23 +02:00
if ( val1 & PORT_START_CABLE_DIAG )
data | = PHY_START_CABLE_DIAG ;
if ( val1 & PORT_CABLE_10M_SHORT )
data | = PHY_CABLE_10M_SHORT ;
data | = FIELD_PREP ( PHY_CABLE_DIAG_RESULT_M ,
FIELD_GET ( PORT_CABLE_DIAG_RESULT_M , val1 ) ) ;
data | = FIELD_PREP ( PHY_CABLE_FAULT_COUNTER_M ,
( FIELD_GET ( PORT_CABLE_FAULT_COUNTER_H , val1 ) < < 8 ) |
FIELD_GET ( PORT_CABLE_FAULT_COUNTER_L , val2 ) ) ;
break ;
2021-06-14 06:31:21 +02:00
case PHY_REG_PHY_CTRL :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_LINK_STATUS ] , & link ) ;
if ( ret )
return ret ;
2021-06-14 06:31:21 +02:00
if ( link & PORT_MDIX_STATUS )
data | = KSZ886X_CTRL_MDIX_STAT ;
break ;
2019-07-29 19:49:47 +02:00
default :
processed = false ;
break ;
}
if ( processed )
* val = data ;
2022-08-26 12:56:21 +02:00
return 0 ;
2019-07-29 19:49:47 +02:00
}
2022-08-26 12:56:21 +02:00
int ksz8_w_phy ( struct ksz_device * dev , u16 phy , u16 reg , u16 val )
2019-07-29 19:49:47 +02:00
{
u8 restart , speed , ctrl , data ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2021-04-27 09:09:03 +02:00
u8 p = phy ;
2022-08-26 12:56:24 +02:00
int ret ;
2019-07-29 19:49:47 +02:00
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2019-07-29 19:49:47 +02:00
switch ( reg ) {
2021-06-14 06:31:18 +02:00
case MII_BMCR :
2019-07-29 19:49:47 +02:00
/* Do not support PHY reset function. */
2021-06-14 06:31:18 +02:00
if ( val & BMCR_RESET )
2019-07-29 19:49:47 +02:00
break ;
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_SPEED_STATUS ] , & speed ) ;
if ( ret )
return ret ;
2019-07-29 19:49:47 +02:00
data = speed ;
2021-06-14 06:31:18 +02:00
if ( val & KSZ886X_BMCR_HP_MDIX )
2019-07-29 19:49:47 +02:00
data | = PORT_HP_MDIX ;
else
data & = ~ PORT_HP_MDIX ;
2022-08-26 12:56:24 +02:00
if ( data ! = speed ) {
ret = ksz_pwrite8 ( dev , p , regs [ P_SPEED_STATUS ] , data ) ;
if ( ret )
return ret ;
}
ret = ksz_pread8 ( dev , p , regs [ P_FORCE_CTRL ] , & ctrl ) ;
if ( ret )
return ret ;
2019-07-29 19:49:47 +02:00
data = ctrl ;
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) ) {
2021-06-14 06:31:18 +02:00
if ( ( val & BMCR_ANENABLE ) )
2021-04-27 09:09:04 +02:00
data | = PORT_AUTO_NEG_ENABLE ;
else
data & = ~ PORT_AUTO_NEG_ENABLE ;
} else {
2021-06-14 06:31:18 +02:00
if ( ! ( val & BMCR_ANENABLE ) )
2021-04-27 09:09:04 +02:00
data | = PORT_AUTO_NEG_DISABLE ;
else
data & = ~ PORT_AUTO_NEG_DISABLE ;
/* Fiber port does not support auto-negotiation. */
if ( dev - > ports [ p ] . fiber )
data | = PORT_AUTO_NEG_DISABLE ;
}
2019-07-29 19:49:47 +02:00
2021-06-14 06:31:18 +02:00
if ( val & BMCR_SPEED100 )
2019-07-29 19:49:47 +02:00
data | = PORT_FORCE_100_MBIT ;
else
data & = ~ PORT_FORCE_100_MBIT ;
2021-06-14 06:31:18 +02:00
if ( val & BMCR_FULLDPLX )
2019-07-29 19:49:47 +02:00
data | = PORT_FORCE_FULL_DUPLEX ;
else
data & = ~ PORT_FORCE_FULL_DUPLEX ;
2022-08-26 12:56:24 +02:00
if ( data ! = ctrl ) {
ret = ksz_pwrite8 ( dev , p , regs [ P_FORCE_CTRL ] , data ) ;
if ( ret )
return ret ;
}
ret = ksz_pread8 ( dev , p , regs [ P_NEG_RESTART_CTRL ] , & restart ) ;
if ( ret )
return ret ;
2019-07-29 19:49:47 +02:00
data = restart ;
2021-06-14 06:31:18 +02:00
if ( val & KSZ886X_BMCR_DISABLE_LED )
2019-07-29 19:49:47 +02:00
data | = PORT_LED_OFF ;
else
data & = ~ PORT_LED_OFF ;
2021-06-14 06:31:18 +02:00
if ( val & KSZ886X_BMCR_DISABLE_TRANSMIT )
2019-07-29 19:49:47 +02:00
data | = PORT_TX_DISABLE ;
else
data & = ~ PORT_TX_DISABLE ;
2021-06-14 06:31:18 +02:00
if ( val & BMCR_ANRESTART )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_NEG_RESTART ;
else
data & = ~ ( PORT_AUTO_NEG_RESTART ) ;
2021-06-14 06:31:18 +02:00
if ( val & BMCR_PDOWN )
2019-07-29 19:49:47 +02:00
data | = PORT_POWER_DOWN ;
else
data & = ~ PORT_POWER_DOWN ;
2021-06-14 06:31:18 +02:00
if ( val & KSZ886X_BMCR_DISABLE_AUTO_MDIX )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_MDIX_DISABLE ;
else
data & = ~ PORT_AUTO_MDIX_DISABLE ;
2021-06-14 06:31:18 +02:00
if ( val & KSZ886X_BMCR_FORCE_MDI )
2019-07-29 19:49:47 +02:00
data | = PORT_FORCE_MDIX ;
else
data & = ~ PORT_FORCE_MDIX ;
2021-06-14 06:31:18 +02:00
if ( val & BMCR_LOOPBACK )
2019-07-29 19:49:47 +02:00
data | = PORT_PHY_LOOPBACK ;
else
data & = ~ PORT_PHY_LOOPBACK ;
2022-08-26 12:56:24 +02:00
if ( data ! = restart ) {
ret = ksz_pwrite8 ( dev , p , regs [ P_NEG_RESTART_CTRL ] ,
data ) ;
if ( ret )
return ret ;
}
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:18 +02:00
case MII_ADVERTISE :
2022-08-26 12:56:24 +02:00
ret = ksz_pread8 ( dev , p , regs [ P_LOCAL_CTRL ] , & ctrl ) ;
if ( ret )
return ret ;
2019-07-29 19:49:47 +02:00
data = ctrl ;
data & = ~ ( PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT_FD |
PORT_AUTO_NEG_10BT ) ;
2021-06-14 06:31:18 +02:00
if ( val & ADVERTISE_PAUSE_CAP )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_NEG_SYM_PAUSE ;
2021-06-14 06:31:18 +02:00
if ( val & ADVERTISE_100FULL )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_NEG_100BTX_FD ;
2021-06-14 06:31:18 +02:00
if ( val & ADVERTISE_100HALF )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_NEG_100BTX ;
2021-06-14 06:31:18 +02:00
if ( val & ADVERTISE_10FULL )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_NEG_10BT_FD ;
2021-06-14 06:31:18 +02:00
if ( val & ADVERTISE_10HALF )
2019-07-29 19:49:47 +02:00
data | = PORT_AUTO_NEG_10BT ;
2022-08-26 12:56:24 +02:00
if ( data ! = ctrl ) {
ret = ksz_pwrite8 ( dev , p , regs [ P_LOCAL_CTRL ] , data ) ;
if ( ret )
return ret ;
}
2019-07-29 19:49:47 +02:00
break ;
2021-06-14 06:31:23 +02:00
case PHY_REG_LINK_MD :
if ( val & PHY_START_CABLE_DIAG )
ksz_port_cfg ( dev , p , REG_PORT_LINK_MD_CTRL , PORT_START_CABLE_DIAG , true ) ;
break ;
2019-07-29 19:49:47 +02:00
default :
break ;
}
2022-08-26 12:56:21 +02:00
return 0 ;
2019-07-29 19:49:47 +02:00
}
2022-06-22 14:34:23 +05:30
void ksz8_cfg_port_member ( struct ksz_device * dev , int port , u8 member )
2019-07-29 19:49:47 +02:00
{
u8 data ;
ksz_pread8 ( dev , port , P_MIRROR_CTRL , & data ) ;
data & = ~ PORT_VLAN_MEMBERSHIP ;
data | = ( member & dev - > port_mask ) ;
ksz_pwrite8 ( dev , port , P_MIRROR_CTRL , data ) ;
}
2022-06-22 14:34:23 +05:30
void ksz8_flush_dyn_mac_table ( struct ksz_device * dev , int port )
2019-07-29 19:49:47 +02:00
{
2020-12-01 21:45:05 +01:00
u8 learn [ DSA_MAX_PORTS ] ;
2019-07-29 19:49:47 +02:00
int first , index , cnt ;
struct ksz_port * p ;
2022-06-28 22:43:28 +05:30
const u16 * regs ;
regs = dev - > info - > regs ;
2019-07-29 19:49:47 +02:00
2022-05-17 15:13:26 +05:30
if ( ( uint ) port < dev - > info - > port_cnt ) {
2019-07-29 19:49:47 +02:00
first = port ;
cnt = port + 1 ;
} else {
/* Flush all ports. */
first = 0 ;
2022-05-17 15:13:26 +05:30
cnt = dev - > info - > port_cnt ;
2019-07-29 19:49:47 +02:00
}
for ( index = first ; index < cnt ; index + + ) {
p = & dev - > ports [ index ] ;
if ( ! p - > on )
continue ;
2022-06-28 22:43:28 +05:30
ksz_pread8 ( dev , index , regs [ P_STP_CTRL ] , & learn [ index ] ) ;
2019-07-29 19:49:47 +02:00
if ( ! ( learn [ index ] & PORT_LEARN_DISABLE ) )
2022-06-28 22:43:28 +05:30
ksz_pwrite8 ( dev , index , regs [ P_STP_CTRL ] ,
2019-07-29 19:49:47 +02:00
learn [ index ] | PORT_LEARN_DISABLE ) ;
}
ksz_cfg ( dev , S_FLUSH_TABLE_CTRL , SW_FLUSH_DYN_MAC_TABLE , true ) ;
for ( index = first ; index < cnt ; index + + ) {
p = & dev - > ports [ index ] ;
if ( ! p - > on )
continue ;
if ( ! ( learn [ index ] & PORT_LEARN_DISABLE ) )
2022-06-28 22:43:28 +05:30
ksz_pwrite8 ( dev , index , regs [ P_STP_CTRL ] , learn [ index ] ) ;
2019-07-29 19:49:47 +02:00
}
}
2022-06-22 14:34:23 +05:30
int ksz8_fdb_dump ( struct ksz_device * dev , int port ,
dsa_fdb_dump_cb_t * cb , void * data )
2022-06-17 14:12:54 +05:30
{
int ret = 0 ;
u16 i = 0 ;
u16 entries = 0 ;
u8 timestamp = 0 ;
u8 fid ;
u8 member ;
struct alu_struct alu ;
do {
alu . is_static = false ;
ret = ksz8_r_dyn_mac_table ( dev , i , alu . mac , & fid , & member ,
& timestamp , & entries ) ;
if ( ! ret & & ( member & BIT ( port ) ) ) {
ret = cb ( alu . mac , alu . fid , alu . is_static , data ) ;
if ( ret )
break ;
}
i + + ;
} while ( i < entries ) ;
if ( i > = entries )
ret = 0 ;
return ret ;
}
2022-06-22 14:34:23 +05:30
int ksz8_mdb_add ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_mdb * mdb , struct dsa_db db )
2022-06-17 14:12:53 +05:30
{
struct alu_struct alu ;
int index ;
int empty = 0 ;
alu . port_forward = 0 ;
for ( index = 0 ; index < dev - > info - > num_statics ; index + + ) {
2022-06-17 14:12:54 +05:30
if ( ! ksz8_r_sta_mac_table ( dev , index , & alu ) ) {
2022-06-17 14:12:53 +05:30
/* Found one already in static MAC table. */
if ( ! memcmp ( alu . mac , mdb - > addr , ETH_ALEN ) & &
alu . fid = = mdb - > vid )
break ;
/* Remember the first empty entry. */
} else if ( ! empty ) {
empty = index + 1 ;
}
}
/* no available entry */
if ( index = = dev - > info - > num_statics & & ! empty )
return - ENOSPC ;
/* add entry */
if ( index = = dev - > info - > num_statics ) {
index = empty - 1 ;
memset ( & alu , 0 , sizeof ( alu ) ) ;
memcpy ( alu . mac , mdb - > addr , ETH_ALEN ) ;
alu . is_static = true ;
}
alu . port_forward | = BIT ( port ) ;
if ( mdb - > vid ) {
alu . is_use_fid = true ;
/* Need a way to map VID to FID. */
alu . fid = mdb - > vid ;
}
2022-06-17 14:12:54 +05:30
ksz8_w_sta_mac_table ( dev , index , & alu ) ;
2022-06-17 14:12:53 +05:30
return 0 ;
}
2022-06-22 14:34:23 +05:30
int ksz8_mdb_del ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_mdb * mdb , struct dsa_db db )
2022-06-17 14:12:53 +05:30
{
struct alu_struct alu ;
int index ;
for ( index = 0 ; index < dev - > info - > num_statics ; index + + ) {
2022-06-17 14:12:54 +05:30
if ( ! ksz8_r_sta_mac_table ( dev , index , & alu ) ) {
2022-06-17 14:12:53 +05:30
/* Found one already in static MAC table. */
if ( ! memcmp ( alu . mac , mdb - > addr , ETH_ALEN ) & &
alu . fid = = mdb - > vid )
break ;
}
}
/* no available entry */
if ( index = = dev - > info - > num_statics )
goto exit ;
/* clear port */
alu . port_forward & = ~ BIT ( port ) ;
if ( ! alu . port_forward )
alu . is_static = false ;
2022-06-17 14:12:54 +05:30
ksz8_w_sta_mac_table ( dev , index , & alu ) ;
2022-06-17 14:12:53 +05:30
exit :
return 0 ;
}
2022-06-22 14:34:23 +05:30
int ksz8_port_vlan_filtering ( struct ksz_device * dev , int port , bool flag ,
struct netlink_ext_ack * extack )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) )
return - ENOTSUPP ;
2021-08-10 01:00:06 +02:00
/* Discard packets with VID not enabled on the switch */
2019-07-29 19:49:47 +02:00
ksz_cfg ( dev , S_MIRROR_CTRL , SW_VLAN_ENABLE , flag ) ;
2021-08-10 01:00:06 +02:00
/* Discard packets with VID not enabled on the ingress port */
for ( port = 0 ; port < dev - > phy_port_cnt ; + + port )
ksz_port_cfg ( dev , port , REG_PORT_CTRL_2 , PORT_INGRESS_FILTER ,
flag ) ;
2019-07-29 19:49:47 +02:00
return 0 ;
}
2021-08-10 00:59:28 +02:00
static void ksz8_port_enable_pvid ( struct ksz_device * dev , int port , bool state )
{
if ( ksz_is_ksz88x3 ( dev ) ) {
ksz_cfg ( dev , REG_SW_INSERT_SRC_PVID ,
0x03 < < ( 4 - 2 * port ) , state ) ;
} else {
ksz_pwrite8 ( dev , port , REG_PORT_CTRL_12 , state ? 0x0f : 0x00 ) ;
}
}
2022-06-22 14:34:23 +05:30
int ksz8_port_vlan_add ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_vlan * vlan ,
struct netlink_ext_ack * extack )
2019-07-29 19:49:47 +02:00
{
bool untagged = vlan - > flags & BRIDGE_VLAN_INFO_UNTAGGED ;
2021-08-10 00:59:37 +02:00
struct ksz_port * p = & dev - > ports [ port ] ;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
u16 data , new_pvid = 0 ;
2019-07-29 19:49:47 +02:00
u8 fid , member , valid ;
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) )
return - ENOTSUPP ;
2021-08-10 00:59:37 +02:00
/* If a VLAN is added with untagged flag different from the
* port ' s Remove Tag flag , we need to change the latter .
* Ignore VID 0 , which is always untagged .
2021-08-10 00:59:57 +02:00
* Ignore CPU port , which will always be tagged .
2021-08-10 00:59:37 +02:00
*/
2021-08-10 00:59:57 +02:00
if ( untagged ! = p - > remove_tag & & vlan - > vid ! = 0 & &
port ! = dev - > cpu_port ) {
2021-08-10 00:59:37 +02:00
unsigned int vid ;
/* Reject attempts to add a VLAN that requires the
* Remove Tag flag to be changed , unless there are no
* other VLANs currently configured .
*/
2022-05-17 15:13:26 +05:30
for ( vid = 1 ; vid < dev - > info - > num_vlans ; + + vid ) {
2021-08-10 00:59:37 +02:00
/* Skip the VID we are going to add or reconfigure */
if ( vid = = vlan - > vid )
continue ;
ksz8_from_vlan ( dev , dev - > vlan_cache [ vid ] . table [ 0 ] ,
& fid , & member , & valid ) ;
if ( valid & & ( member & BIT ( port ) ) )
return - EINVAL ;
}
ksz_port_cfg ( dev , port , P_TAG_CTRL , PORT_REMOVE_TAG , untagged ) ;
p - > remove_tag = untagged ;
}
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:01 +02:00
ksz8_r_vlan_table ( dev , vlan - > vid , & data ) ;
2021-04-27 09:09:03 +02:00
ksz8_from_vlan ( dev , data , & fid , & member , & valid ) ;
2019-07-29 19:49:47 +02:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
/* First time to setup the VLAN entry. */
if ( ! valid ) {
/* Need to find a way to map VID to FID. */
fid = 1 ;
valid = 1 ;
}
member | = BIT ( port ) ;
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:03 +02:00
ksz8_to_vlan ( dev , fid , member , valid , & data ) ;
2021-04-27 09:09:01 +02:00
ksz8_w_vlan_table ( dev , vlan - > vid , data ) ;
2019-07-29 19:49:47 +02:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
/* change PVID */
if ( vlan - > flags & BRIDGE_VLAN_INFO_PVID )
new_pvid = vlan - > vid ;
2019-07-29 19:49:47 +02:00
if ( new_pvid ) {
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
u16 vid ;
2019-07-29 19:49:47 +02:00
ksz_pread16 ( dev , port , REG_PORT_CTRL_VID , & vid ) ;
2021-08-10 00:59:28 +02:00
vid & = ~ VLAN_VID_MASK ;
2019-07-29 19:49:47 +02:00
vid | = new_pvid ;
ksz_pwrite16 ( dev , port , REG_PORT_CTRL_VID , vid ) ;
2021-08-10 00:59:28 +02:00
ksz8_port_enable_pvid ( dev , port , true ) ;
2019-07-29 19:49:47 +02:00
}
2021-01-09 02:01:53 +02:00
return 0 ;
2019-07-29 19:49:47 +02:00
}
2022-06-22 14:34:23 +05:30
int ksz8_port_vlan_del ( struct ksz_device * dev , int port ,
const struct switchdev_obj_port_vlan * vlan )
2019-07-29 19:49:47 +02:00
{
2021-08-10 00:59:28 +02:00
u16 data , pvid ;
2019-07-29 19:49:47 +02:00
u8 fid , member , valid ;
2021-04-27 09:09:04 +02:00
if ( ksz_is_ksz88x3 ( dev ) )
return - ENOTSUPP ;
2019-07-29 19:49:47 +02:00
ksz_pread16 ( dev , port , REG_PORT_CTRL_VID , & pvid ) ;
pvid = pvid & 0xFFF ;
2021-04-27 09:09:01 +02:00
ksz8_r_vlan_table ( dev , vlan - > vid , & data ) ;
2021-04-27 09:09:03 +02:00
ksz8_from_vlan ( dev , data , & fid , & member , & valid ) ;
2019-07-29 19:49:47 +02:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
member & = ~ BIT ( port ) ;
2019-07-29 19:49:47 +02:00
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 02:01:46 +02:00
/* Invalidate the entry if no more member. */
if ( ! member ) {
fid = 0 ;
valid = 0 ;
}
2019-07-29 19:49:47 +02:00
2021-04-27 09:09:03 +02:00
ksz8_to_vlan ( dev , fid , member , valid , & data ) ;
2021-04-27 09:09:01 +02:00
ksz8_w_vlan_table ( dev , vlan - > vid , data ) ;
2019-07-29 19:49:47 +02:00
2021-08-10 00:59:28 +02:00
if ( pvid = = vlan - > vid )
ksz8_port_enable_pvid ( dev , port , false ) ;
2019-07-29 19:49:47 +02:00
return 0 ;
}
2022-06-22 14:34:23 +05:30
int ksz8_port_mirror_add ( struct ksz_device * dev , int port ,
struct dsa_mall_mirror_tc_entry * mirror ,
bool ingress , struct netlink_ext_ack * extack )
2019-07-29 19:49:47 +02:00
{
if ( ingress ) {
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_RX , true ) ;
dev - > mirror_rx | = BIT ( port ) ;
} else {
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_TX , true ) ;
dev - > mirror_tx | = BIT ( port ) ;
}
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_SNIFFER , false ) ;
/* configure mirror port */
if ( dev - > mirror_rx | | dev - > mirror_tx )
ksz_port_cfg ( dev , mirror - > to_local_port , P_MIRROR_CTRL ,
PORT_MIRROR_SNIFFER , true ) ;
return 0 ;
}
2022-06-22 14:34:23 +05:30
void ksz8_port_mirror_del ( struct ksz_device * dev , int port ,
struct dsa_mall_mirror_tc_entry * mirror )
2019-07-29 19:49:47 +02:00
{
u8 data ;
if ( mirror - > ingress ) {
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_RX , false ) ;
dev - > mirror_rx & = ~ BIT ( port ) ;
} else {
ksz_port_cfg ( dev , port , P_MIRROR_CTRL , PORT_MIRROR_TX , false ) ;
dev - > mirror_tx & = ~ BIT ( port ) ;
}
ksz_pread8 ( dev , port , P_MIRROR_CTRL , & data ) ;
if ( ! dev - > mirror_rx & & ! dev - > mirror_tx )
ksz_port_cfg ( dev , mirror - > to_local_port , P_MIRROR_CTRL ,
PORT_MIRROR_SNIFFER , false ) ;
}
2021-04-27 09:09:02 +02:00
static void ksz8795_cpu_interface_select ( struct ksz_device * dev , int port )
{
struct ksz_port * p = & dev - > ports [ port ] ;
if ( ! p - > interface & & dev - > compat_interface ) {
dev_warn ( dev - > dev ,
" Using legacy switch \" phy-mode \" property, because it is missing on port %d node. "
" Please update your device tree. \n " ,
port ) ;
p - > interface = dev - > compat_interface ;
}
}
2022-06-22 14:34:23 +05:30
void ksz8_port_setup ( struct ksz_device * dev , int port , bool cpu_port )
2019-07-29 19:49:47 +02:00
{
2021-11-26 13:39:26 +01:00
struct dsa_switch * ds = dev - > ds ;
2021-04-27 09:09:03 +02:00
const u32 * masks ;
2021-04-27 09:09:02 +02:00
u8 member ;
2019-07-29 19:49:47 +02:00
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2021-04-27 09:09:03 +02:00
2019-07-29 19:49:47 +02:00
/* enable broadcast storm limit */
ksz_port_cfg ( dev , port , P_BCAST_STORM_CTRL , PORT_BROADCAST_STORM , true ) ;
2021-04-27 09:09:04 +02:00
if ( ! ksz_is_ksz88x3 ( dev ) )
ksz8795_set_prio_queue ( dev , port , 4 ) ;
2019-07-29 19:49:47 +02:00
/* disable DiffServ priority */
ksz_port_cfg ( dev , port , P_PRIO_CTRL , PORT_DIFFSERV_ENABLE , false ) ;
/* replace priority */
2021-04-27 09:09:03 +02:00
ksz_port_cfg ( dev , port , P_802_1P_CTRL ,
masks [ PORT_802_1P_REMAPPING ] , false ) ;
2019-07-29 19:49:47 +02:00
/* enable 802.1p priority */
ksz_port_cfg ( dev , port , P_PRIO_CTRL , PORT_802_1P_ENABLE , true ) ;
if ( cpu_port ) {
2021-04-27 09:09:04 +02:00
if ( ! ksz_is_ksz88x3 ( dev ) )
ksz8795_cpu_interface_select ( dev , port ) ;
2019-07-29 19:49:47 +02:00
2021-11-26 13:39:26 +01:00
member = dsa_user_ports ( ds ) ;
2019-07-29 19:49:47 +02:00
} else {
2021-11-26 13:39:26 +01:00
member = BIT ( dsa_upstream_port ( ds , port ) ) ;
2019-07-29 19:49:47 +02:00
}
2021-11-26 13:39:26 +01:00
2021-04-27 09:09:01 +02:00
ksz8_cfg_port_member ( dev , port , member ) ;
2019-07-29 19:49:47 +02:00
}
2022-06-22 14:34:23 +05:30
void ksz8_config_cpu_port ( struct dsa_switch * ds )
2019-07-29 19:49:47 +02:00
{
struct ksz_device * dev = ds - > priv ;
struct ksz_port * p ;
2021-04-27 09:09:03 +02:00
const u32 * masks ;
2022-06-28 22:43:27 +05:30
const u16 * regs ;
2019-07-29 19:49:47 +02:00
u8 remote ;
int i ;
2022-06-28 22:43:24 +05:30
masks = dev - > info - > masks ;
2022-06-28 22:43:23 +05:30
regs = dev - > info - > regs ;
2021-04-27 09:09:03 +02:00
ksz_cfg ( dev , regs [ S_TAIL_TAG_CTRL ] , masks [ SW_TAIL_TAG_ENABLE ] , true ) ;
2019-07-29 19:49:47 +02:00
p = & dev - > ports [ dev - > cpu_port ] ;
p - > on = 1 ;
2021-04-27 09:09:01 +02:00
ksz8_port_setup ( dev , dev - > cpu_port , true ) ;
2019-07-29 19:49:47 +02:00
2020-12-01 21:45:01 +01:00
for ( i = 0 ; i < dev - > phy_port_cnt ; i + + ) {
2019-07-29 19:49:47 +02:00
p = & dev - > ports [ i ] ;
2022-06-17 14:12:51 +05:30
ksz_port_stp_state_set ( ds , i , BR_STATE_DISABLED ) ;
2019-07-29 19:49:47 +02:00
/* Last port may be disabled. */
2020-12-01 21:45:01 +01:00
if ( i = = dev - > phy_port_cnt )
2019-07-29 19:49:47 +02:00
break ;
p - > on = 1 ;
}
for ( i = 0 ; i < dev - > phy_port_cnt ; i + + ) {
p = & dev - > ports [ i ] ;
if ( ! p - > on )
continue ;
2021-04-27 09:09:04 +02:00
if ( ! ksz_is_ksz88x3 ( dev ) ) {
ksz_pread8 ( dev , i , regs [ P_REMOTE_STATUS ] , & remote ) ;
2022-06-17 14:12:46 +05:30
if ( remote & KSZ8_PORT_FIBER_MODE )
2021-04-27 09:09:04 +02:00
p - > fiber = 1 ;
}
2019-07-29 19:49:47 +02:00
if ( p - > fiber )
2022-06-28 22:43:28 +05:30
ksz_port_cfg ( dev , i , regs [ P_STP_CTRL ] ,
PORT_FORCE_FLOW_CTRL , true ) ;
2019-07-29 19:49:47 +02:00
else
2022-06-28 22:43:28 +05:30
ksz_port_cfg ( dev , i , regs [ P_STP_CTRL ] ,
PORT_FORCE_FLOW_CTRL , false ) ;
2019-07-29 19:49:47 +02:00
}
}
2022-03-16 13:55:29 +01:00
static int ksz8_handle_global_errata ( struct dsa_switch * ds )
{
struct ksz_device * dev = ds - > priv ;
int ret = 0 ;
/* KSZ87xx Errata DS80000687C.
* Module 2 : Link drops with some EEE link partners .
* An issue with the EEE next page exchange between the
* KSZ879x / KSZ877x / KSZ876x and some EEE link partners may result in
* the link dropping .
*/
2022-05-17 15:13:26 +05:30
if ( dev - > info - > ksz87xx_eee_link_erratum )
2022-03-16 13:55:29 +01:00
ret = ksz8_ind_write8 ( dev , TABLE_EEE , REG_IND_EEE_GLOB2_HI , 0 ) ;
return ret ;
}
2022-06-22 14:34:23 +05:30
int ksz8_enable_stp_addr ( struct ksz_device * dev )
2022-06-22 14:34:15 +05:30
{
struct alu_struct alu ;
/* Setup STP address for STP operation. */
memset ( & alu , 0 , sizeof ( alu ) ) ;
ether_addr_copy ( alu . mac , eth_stp_addr ) ;
alu . is_static = true ;
alu . is_override = true ;
alu . port_forward = dev - > info - > cpu_ports ;
ksz8_w_sta_mac_table ( dev , 0 , & alu ) ;
return 0 ;
}
2022-06-22 14:34:23 +05:30
int ksz8_setup ( struct dsa_switch * ds )
2019-07-29 19:49:47 +02:00
{
struct ksz_device * dev = ds - > priv ;
2022-06-22 14:34:16 +05:30
int i ;
2019-07-29 19:49:47 +02:00
2022-12-05 06:22:31 +01:00
ds - > mtu_enforcement_ingress = true ;
2022-12-05 06:22:32 +01:00
/* We rely on software untagging on the CPU port, so that we
* can support both tagged and untagged VLANs
*/
ds - > untag_bridge_pvid = true ;
/* VLAN filtering is partly controlled by the global VLAN
* Enable flag
*/
ds - > vlan_filtering_is_global = true ;
2019-07-29 19:49:47 +02:00
ksz_cfg ( dev , S_REPLACE_VID_CTRL , SW_FLOW_CTRL , true ) ;
/* Enable automatic fast aging when link changed detected. */
ksz_cfg ( dev , S_LINK_AGING_CTRL , SW_LINK_AUTO_AGING , true ) ;
/* Enable aggressive back off algorithm in half duplex mode. */
regmap_update_bits ( dev - > regmap [ 0 ] , REG_SW_CTRL_1 ,
SW_AGGR_BACKOFF , SW_AGGR_BACKOFF ) ;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop .
*/
regmap_update_bits ( dev - > regmap [ 0 ] , REG_SW_CTRL_2 ,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP ,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP ) ;
ksz_cfg ( dev , S_REPLACE_VID_CTRL , SW_REPLACE_VID , false ) ;
ksz_cfg ( dev , S_MIRROR_CTRL , SW_MIRROR_RX_TX , false ) ;
2021-08-10 00:59:28 +02:00
if ( ! ksz_is_ksz88x3 ( dev ) )
ksz_cfg ( dev , REG_SW_CTRL_19 , SW_INS_TAG_ENABLE , true ) ;
2022-05-17 15:13:26 +05:30
for ( i = 0 ; i < ( dev - > info - > num_vlans / 4 ) ; i + + )
2021-04-27 09:09:01 +02:00
ksz8_r_vlan_entries ( dev , i ) ;
2019-07-29 19:49:47 +02:00
2022-03-16 13:55:29 +01:00
return ksz8_handle_global_errata ( ds ) ;
2019-07-29 19:49:47 +02:00
}
2022-06-22 14:34:23 +05:30
void ksz8_get_caps ( struct ksz_device * dev , int port ,
struct phylink_config * config )
2021-06-14 06:31:19 +02:00
{
2022-02-02 10:24:18 +00:00
config - > mac_capabilities = MAC_10 | MAC_100 ;
2021-06-14 06:31:19 +02:00
/* Silicon Errata Sheet (DS80000830A):
* " Port 1 does not respond to received flow control PAUSE frames "
* So , disable Pause support on " Port 1 " ( port = = 0 ) for all ksz88x3
* switches .
*/
if ( ! ksz_is_ksz88x3 ( dev ) | | port )
2022-02-02 10:24:18 +00:00
config - > mac_capabilities | = MAC_SYM_PAUSE ;
2021-06-14 06:31:19 +02:00
/* Asym pause is not supported on KSZ8863 and KSZ8873 */
if ( ! ksz_is_ksz88x3 ( dev ) )
2022-02-02 10:24:18 +00:00
config - > mac_capabilities | = MAC_ASYM_PAUSE ;
2021-06-14 06:31:19 +02:00
}
2022-06-22 14:34:23 +05:30
u32 ksz8_get_port_addr ( int port , int offset )
2019-07-29 19:49:47 +02:00
{
return PORT_CTRL_ADDR ( port , offset ) ;
}
2022-06-22 14:34:23 +05:30
int ksz8_switch_init ( struct ksz_device * dev )
2019-07-29 19:49:47 +02:00
{
2022-05-17 15:13:26 +05:30
dev - > cpu_port = fls ( dev - > info - > cpu_ports ) - 1 ;
dev - > phy_port_cnt = dev - > info - > port_cnt - 1 ;
dev - > port_mask = ( BIT ( dev - > phy_port_cnt ) - 1 ) | dev - > info - > cpu_ports ;
2019-07-29 19:49:47 +02:00
return 0 ;
}
2022-06-22 14:34:23 +05:30
void ksz8_switch_exit ( struct ksz_device * dev )
2019-07-29 19:49:47 +02:00
{
2021-04-27 09:09:01 +02:00
ksz8_reset_switch ( dev ) ;
2019-07-29 19:49:47 +02:00
}
MODULE_AUTHOR ( " Tristram Ha <Tristram.Ha@microchip.com> " ) ;
MODULE_DESCRIPTION ( " Microchip KSZ8795 Series Switch DSA Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;