2018-05-23 08:20:23 +02:00
// SPDX-License-Identifier: GPL-2.0
2016-09-15 16:26:41 +02:00
/*
* Copyright ( C ) 2009 Felix Fietkau < nbd @ nbd . name >
* Copyright ( C ) 2011 - 2012 Gabor Juhos < juhosg @ openwrt . org >
2019-07-28 08:57:50 +08:00
* Copyright ( c ) 2015 , 2019 , The Linux Foundation . All rights reserved .
2016-09-15 16:26:41 +02:00
* Copyright ( c ) 2016 John Crispin < john @ phrozen . org >
*/
# include <linux/module.h>
# include <linux/phy.h>
# include <linux/netdevice.h>
2021-11-22 16:23:41 +01:00
# include <linux/bitfield.h>
2021-11-22 16:23:44 +01:00
# include <linux/regmap.h>
2016-09-15 16:26:41 +02:00
# include <net/dsa.h>
# include <linux/of_net.h>
2021-05-14 23:00:10 +02:00
# include <linux/of_mdio.h>
2016-09-15 16:26:41 +02:00
# include <linux/of_platform.h>
# include <linux/mdio.h>
2020-06-20 11:30:32 +01:00
# include <linux/phylink.h>
2019-07-12 17:33:36 +02:00
# include <linux/gpio/consumer.h>
2016-09-15 16:26:41 +02:00
# include <linux/etherdevice.h>
2022-02-02 01:03:29 +01:00
# include <linux/dsa/tag_qca.h>
2016-09-15 16:26:41 +02:00
# include "qca8k.h"
static void
qca8k_split_addr ( u32 regaddr , u16 * r1 , u16 * r2 , u16 * page )
{
regaddr > > = 1 ;
* r1 = regaddr & 0x1e ;
regaddr > > = 5 ;
* r2 = regaddr & 0x7 ;
regaddr > > = 3 ;
* page = regaddr & 0x3ff ;
}
2022-02-02 01:03:33 +01:00
static int
qca8k_set_lo ( struct qca8k_priv * priv , int phy_id , u32 regnum , u16 lo )
{
u16 * cached_lo = & priv - > mdio_cache . lo ;
struct mii_bus * bus = priv - > bus ;
int ret ;
if ( lo = = * cached_lo )
return 0 ;
ret = bus - > write ( bus , phy_id , regnum , lo ) ;
if ( ret < 0 )
dev_err_ratelimited ( & bus - > dev ,
" failed to write qca8k 32bit lo register \n " ) ;
* cached_lo = lo ;
return 0 ;
}
static int
qca8k_set_hi ( struct qca8k_priv * priv , int phy_id , u32 regnum , u16 hi )
{
u16 * cached_hi = & priv - > mdio_cache . hi ;
struct mii_bus * bus = priv - > bus ;
int ret ;
if ( hi = = * cached_hi )
return 0 ;
ret = bus - > write ( bus , phy_id , regnum , hi ) ;
if ( ret < 0 )
dev_err_ratelimited ( & bus - > dev ,
" failed to write qca8k 32bit hi register \n " ) ;
* cached_hi = hi ;
return 0 ;
}
2021-05-29 11:04:38 +08:00
static int
qca8k_mii_read32 ( struct mii_bus * bus , int phy_id , u32 regnum , u32 * val )
2016-09-15 16:26:41 +02:00
{
int ret ;
ret = bus - > read ( bus , phy_id , regnum ) ;
if ( ret > = 0 ) {
2021-05-29 11:04:38 +08:00
* val = ret ;
2016-09-15 16:26:41 +02:00
ret = bus - > read ( bus , phy_id , regnum + 1 ) ;
2021-05-29 11:04:38 +08:00
* val | = ret < < 16 ;
2016-09-15 16:26:41 +02:00
}
if ( ret < 0 ) {
dev_err_ratelimited ( & bus - > dev ,
" failed to read qca8k 32bit register \n " ) ;
2021-05-29 11:04:38 +08:00
* val = 0 ;
2016-09-15 16:26:41 +02:00
return ret ;
}
2021-05-29 11:04:38 +08:00
return 0 ;
2016-09-15 16:26:41 +02:00
}
static void
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( struct qca8k_priv * priv , int phy_id , u32 regnum , u32 val )
2016-09-15 16:26:41 +02:00
{
u16 lo , hi ;
int ret ;
lo = val & 0xffff ;
hi = ( u16 ) ( val > > 16 ) ;
2022-02-02 01:03:33 +01:00
ret = qca8k_set_lo ( priv , phy_id , regnum , lo ) ;
2016-09-15 16:26:41 +02:00
if ( ret > = 0 )
2022-02-02 01:03:33 +01:00
ret = qca8k_set_hi ( priv , phy_id , regnum + 1 , hi ) ;
2016-09-15 16:26:41 +02:00
}
2021-05-14 22:59:54 +02:00
static int
2022-02-02 01:03:32 +01:00
qca8k_set_page ( struct qca8k_priv * priv , u16 page )
2016-09-15 16:26:41 +02:00
{
2022-02-02 01:03:32 +01:00
u16 * cached_page = & priv - > mdio_cache . page ;
struct mii_bus * bus = priv - > bus ;
2021-05-14 22:59:54 +02:00
int ret ;
2022-02-02 01:03:32 +01:00
if ( page = = * cached_page )
2021-05-14 22:59:54 +02:00
return 0 ;
2016-09-15 16:26:41 +02:00
2021-05-14 22:59:54 +02:00
ret = bus - > write ( bus , 0x18 , 0 , page ) ;
if ( ret < 0 ) {
2016-09-15 16:26:41 +02:00
dev_err_ratelimited ( & bus - > dev ,
" failed to set qca8k page \n " ) ;
2021-05-14 22:59:54 +02:00
return ret ;
}
2022-02-02 01:03:32 +01:00
* cached_page = page ;
2021-05-14 23:00:09 +02:00
usleep_range ( 1000 , 2000 ) ;
2021-05-14 22:59:54 +02:00
return 0 ;
2016-09-15 16:26:41 +02:00
}
2022-02-02 01:03:29 +01:00
static void qca8k_rw_reg_ack_handler ( struct dsa_switch * ds , struct sk_buff * skb )
{
struct qca8k_mgmt_eth_data * mgmt_eth_data ;
struct qca8k_priv * priv = ds - > priv ;
struct qca_mgmt_ethhdr * mgmt_ethhdr ;
2022-10-12 19:18:36 +02:00
u32 command ;
2022-02-02 01:03:29 +01:00
u8 len , cmd ;
2022-10-12 19:18:36 +02:00
int i ;
2022-02-02 01:03:29 +01:00
mgmt_ethhdr = ( struct qca_mgmt_ethhdr * ) skb_mac_header ( skb ) ;
mgmt_eth_data = & priv - > mgmt_eth_data ;
2022-10-12 19:18:36 +02:00
command = get_unaligned_le32 ( & mgmt_ethhdr - > command ) ;
cmd = FIELD_GET ( QCA_HDR_MGMT_CMD , command ) ;
len = FIELD_GET ( QCA_HDR_MGMT_LENGTH , command ) ;
2022-02-02 01:03:29 +01:00
/* Make sure the seq match the requested packet */
2022-10-12 19:18:36 +02:00
if ( get_unaligned_le32 ( & mgmt_ethhdr - > seq ) = = mgmt_eth_data - > seq )
2022-02-02 01:03:29 +01:00
mgmt_eth_data - > ack = true ;
if ( cmd = = MDIO_READ ) {
2022-10-12 19:18:36 +02:00
u32 * val = mgmt_eth_data - > data ;
* val = get_unaligned_le32 ( & mgmt_ethhdr - > mdio_data ) ;
2022-02-02 01:03:29 +01:00
2022-02-02 01:03:34 +01:00
/* Get the rest of the 12 byte of data.
* The read / write function will extract the requested data .
*/
2022-10-12 19:18:36 +02:00
if ( len > QCA_HDR_MGMT_DATA1_LEN ) {
__le32 * data2 = ( __le32 * ) skb - > data ;
int data_len = min_t ( int , QCA_HDR_MGMT_DATA2_LEN ,
len - QCA_HDR_MGMT_DATA1_LEN ) ;
val + + ;
for ( i = sizeof ( u32 ) ; i < = data_len ; i + = sizeof ( u32 ) ) {
* val = get_unaligned_le32 ( data2 ) ;
val + + ;
data2 + + ;
}
}
2022-02-02 01:03:29 +01:00
}
complete ( & mgmt_eth_data - > rw_done ) ;
}
static struct sk_buff * qca8k_alloc_mdio_header ( enum mdio_cmd cmd , u32 reg , u32 * val ,
2022-02-02 01:03:34 +01:00
int priority , unsigned int len )
2022-02-02 01:03:29 +01:00
{
struct qca_mgmt_ethhdr * mgmt_ethhdr ;
2022-02-02 01:03:34 +01:00
unsigned int real_len ;
2022-02-02 01:03:29 +01:00
struct sk_buff * skb ;
2022-10-12 19:18:36 +02:00
__le32 * data2 ;
u32 command ;
2022-02-02 01:03:29 +01:00
u16 hdr ;
2022-10-12 19:18:36 +02:00
int i ;
2022-02-02 01:03:29 +01:00
skb = dev_alloc_skb ( QCA_HDR_MGMT_PKT_LEN ) ;
if ( ! skb )
return NULL ;
2022-02-02 01:03:34 +01:00
/* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
* Actually for some reason the steps are :
* 0 : nothing
* 1 - 4 : first 4 byte
* 5 - 6 : first 12 byte
* 7 - 15 : all 16 byte
*/
if ( len = = 16 )
real_len = 15 ;
else
real_len = len ;
2022-02-02 01:03:29 +01:00
skb_reset_mac_header ( skb ) ;
skb_set_network_header ( skb , skb - > len ) ;
mgmt_ethhdr = skb_push ( skb , QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN ) ;
hdr = FIELD_PREP ( QCA_HDR_XMIT_VERSION , QCA_HDR_VERSION ) ;
hdr | = FIELD_PREP ( QCA_HDR_XMIT_PRIORITY , priority ) ;
hdr | = QCA_HDR_XMIT_FROM_CPU ;
hdr | = FIELD_PREP ( QCA_HDR_XMIT_DP_BIT , BIT ( 0 ) ) ;
hdr | = FIELD_PREP ( QCA_HDR_XMIT_CONTROL , QCA_HDR_XMIT_TYPE_RW_REG ) ;
2022-10-12 19:18:36 +02:00
command = FIELD_PREP ( QCA_HDR_MGMT_ADDR , reg ) ;
command | = FIELD_PREP ( QCA_HDR_MGMT_LENGTH , real_len ) ;
command | = FIELD_PREP ( QCA_HDR_MGMT_CMD , cmd ) ;
command | = FIELD_PREP ( QCA_HDR_MGMT_CHECK_CODE ,
2022-02-02 01:03:29 +01:00
QCA_HDR_MGMT_CHECK_CODE_VAL ) ;
2022-10-12 19:18:36 +02:00
put_unaligned_le32 ( command , & mgmt_ethhdr - > command ) ;
2022-02-02 01:03:29 +01:00
if ( cmd = = MDIO_WRITE )
2022-10-12 19:18:36 +02:00
put_unaligned_le32 ( * val , & mgmt_ethhdr - > mdio_data ) ;
2022-02-02 01:03:29 +01:00
mgmt_ethhdr - > hdr = htons ( hdr ) ;
2022-02-02 01:03:34 +01:00
data2 = skb_put_zero ( skb , QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN ) ;
2022-10-12 19:18:36 +02:00
if ( cmd = = MDIO_WRITE & & len > QCA_HDR_MGMT_DATA1_LEN ) {
int data_len = min_t ( int , QCA_HDR_MGMT_DATA2_LEN ,
len - QCA_HDR_MGMT_DATA1_LEN ) ;
val + + ;
for ( i = sizeof ( u32 ) ; i < = data_len ; i + = sizeof ( u32 ) ) {
put_unaligned_le32 ( * val , data2 ) ;
data2 + + ;
val + + ;
}
}
2022-02-02 01:03:29 +01:00
return skb ;
}
static void qca8k_mdio_header_fill_seq_num ( struct sk_buff * skb , u32 seq_num )
{
struct qca_mgmt_ethhdr * mgmt_ethhdr ;
2022-10-12 19:18:36 +02:00
u32 seq ;
2022-02-02 01:03:29 +01:00
2022-10-12 19:18:36 +02:00
seq = FIELD_PREP ( QCA_HDR_MGMT_SEQ_NUM , seq_num ) ;
2022-02-02 01:03:29 +01:00
mgmt_ethhdr = ( struct qca_mgmt_ethhdr * ) skb - > data ;
2022-10-12 19:18:36 +02:00
put_unaligned_le32 ( seq , & mgmt_ethhdr - > seq ) ;
2022-02-02 01:03:29 +01:00
}
2022-02-02 01:03:34 +01:00
static int qca8k_read_eth ( struct qca8k_priv * priv , u32 reg , u32 * val , int len )
2022-02-02 01:03:29 +01:00
{
struct qca8k_mgmt_eth_data * mgmt_eth_data = & priv - > mgmt_eth_data ;
struct sk_buff * skb ;
bool ack ;
int ret ;
skb = qca8k_alloc_mdio_header ( MDIO_READ , reg , NULL ,
2022-02-02 01:03:34 +01:00
QCA8K_ETHERNET_MDIO_PRIORITY , len ) ;
2022-02-02 01:03:29 +01:00
if ( ! skb )
return - ENOMEM ;
mutex_lock ( & mgmt_eth_data - > mutex ) ;
/* Check mgmt_master if is operational */
if ( ! priv - > mgmt_master ) {
kfree_skb ( skb ) ;
mutex_unlock ( & mgmt_eth_data - > mutex ) ;
return - EINVAL ;
}
skb - > dev = priv - > mgmt_master ;
reinit_completion ( & mgmt_eth_data - > rw_done ) ;
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data - > seq + + ;
qca8k_mdio_header_fill_seq_num ( skb , mgmt_eth_data - > seq ) ;
mgmt_eth_data - > ack = false ;
dev_queue_xmit ( skb ) ;
ret = wait_for_completion_timeout ( & mgmt_eth_data - > rw_done ,
msecs_to_jiffies ( QCA8K_ETHERNET_TIMEOUT ) ) ;
* val = mgmt_eth_data - > data [ 0 ] ;
2022-02-02 01:03:34 +01:00
if ( len > QCA_HDR_MGMT_DATA1_LEN )
memcpy ( val + 1 , mgmt_eth_data - > data + 1 , len - QCA_HDR_MGMT_DATA1_LEN ) ;
2022-02-02 01:03:29 +01:00
ack = mgmt_eth_data - > ack ;
mutex_unlock ( & mgmt_eth_data - > mutex ) ;
if ( ret < = 0 )
return - ETIMEDOUT ;
if ( ! ack )
return - EINVAL ;
return 0 ;
}
2022-02-02 01:03:34 +01:00
static int qca8k_write_eth ( struct qca8k_priv * priv , u32 reg , u32 * val , int len )
2022-02-02 01:03:29 +01:00
{
struct qca8k_mgmt_eth_data * mgmt_eth_data = & priv - > mgmt_eth_data ;
struct sk_buff * skb ;
bool ack ;
int ret ;
2022-02-02 01:03:34 +01:00
skb = qca8k_alloc_mdio_header ( MDIO_WRITE , reg , val ,
QCA8K_ETHERNET_MDIO_PRIORITY , len ) ;
2022-02-02 01:03:29 +01:00
if ( ! skb )
return - ENOMEM ;
mutex_lock ( & mgmt_eth_data - > mutex ) ;
/* Check mgmt_master if is operational */
if ( ! priv - > mgmt_master ) {
kfree_skb ( skb ) ;
mutex_unlock ( & mgmt_eth_data - > mutex ) ;
return - EINVAL ;
}
skb - > dev = priv - > mgmt_master ;
reinit_completion ( & mgmt_eth_data - > rw_done ) ;
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data - > seq + + ;
qca8k_mdio_header_fill_seq_num ( skb , mgmt_eth_data - > seq ) ;
mgmt_eth_data - > ack = false ;
dev_queue_xmit ( skb ) ;
ret = wait_for_completion_timeout ( & mgmt_eth_data - > rw_done ,
msecs_to_jiffies ( QCA8K_ETHERNET_TIMEOUT ) ) ;
ack = mgmt_eth_data - > ack ;
mutex_unlock ( & mgmt_eth_data - > mutex ) ;
if ( ret < = 0 )
return - ETIMEDOUT ;
if ( ! ack )
return - EINVAL ;
return 0 ;
}
static int
qca8k_regmap_update_bits_eth ( struct qca8k_priv * priv , u32 reg , u32 mask , u32 write_val )
{
u32 val = 0 ;
int ret ;
2022-02-02 01:03:34 +01:00
ret = qca8k_read_eth ( priv , reg , & val , sizeof ( val ) ) ;
2022-02-02 01:03:29 +01:00
if ( ret )
return ret ;
val & = ~ mask ;
val | = write_val ;
2022-02-02 01:03:34 +01:00
return qca8k_write_eth ( priv , reg , & val , sizeof ( val ) ) ;
2022-02-02 01:03:29 +01:00
}
2021-11-22 16:23:44 +01:00
static int
qca8k_regmap_read ( void * ctx , uint32_t reg , uint32_t * val )
{
struct qca8k_priv * priv = ( struct qca8k_priv * ) ctx ;
2021-05-14 22:59:53 +02:00
struct mii_bus * bus = priv - > bus ;
2016-09-15 16:26:41 +02:00
u16 r1 , r2 , page ;
2021-05-29 11:04:38 +08:00
int ret ;
2016-09-15 16:26:41 +02:00
2022-02-10 06:13:04 +08:00
if ( ! qca8k_read_eth ( priv , reg , val , sizeof ( * val ) ) )
2022-02-02 01:03:29 +01:00
return 0 ;
2016-09-15 16:26:41 +02:00
qca8k_split_addr ( reg , & r1 , & r2 , & page ) ;
2021-05-14 22:59:53 +02:00
mutex_lock_nested ( & bus - > mdio_lock , MDIO_MUTEX_NESTED ) ;
2016-09-15 16:26:41 +02:00
2022-02-02 01:03:32 +01:00
ret = qca8k_set_page ( priv , page ) ;
2021-05-29 11:04:38 +08:00
if ( ret < 0 )
2021-05-14 22:59:54 +02:00
goto exit ;
2021-05-29 11:04:38 +08:00
ret = qca8k_mii_read32 ( bus , 0x10 | r2 , r1 , val ) ;
2016-09-15 16:26:41 +02:00
2021-05-14 22:59:54 +02:00
exit :
2021-05-14 22:59:53 +02:00
mutex_unlock ( & bus - > mdio_lock ) ;
2021-05-29 11:04:38 +08:00
return ret ;
2016-09-15 16:26:41 +02:00
}
2021-05-14 22:59:56 +02:00
static int
2021-11-22 16:23:44 +01:00
qca8k_regmap_write ( void * ctx , uint32_t reg , uint32_t val )
2016-09-15 16:26:41 +02:00
{
2021-11-22 16:23:44 +01:00
struct qca8k_priv * priv = ( struct qca8k_priv * ) ctx ;
2021-05-14 22:59:53 +02:00
struct mii_bus * bus = priv - > bus ;
2016-09-15 16:26:41 +02:00
u16 r1 , r2 , page ;
2021-05-14 22:59:54 +02:00
int ret ;
2016-09-15 16:26:41 +02:00
2022-02-02 01:03:34 +01:00
if ( ! qca8k_write_eth ( priv , reg , & val , sizeof ( val ) ) )
2022-02-02 01:03:29 +01:00
return 0 ;
2016-09-15 16:26:41 +02:00
qca8k_split_addr ( reg , & r1 , & r2 , & page ) ;
2021-05-14 22:59:53 +02:00
mutex_lock_nested ( & bus - > mdio_lock , MDIO_MUTEX_NESTED ) ;
2016-09-15 16:26:41 +02:00
2022-02-02 01:03:32 +01:00
ret = qca8k_set_page ( priv , page ) ;
2021-05-14 22:59:54 +02:00
if ( ret < 0 )
goto exit ;
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( priv , 0x10 | r2 , r1 , val ) ;
2016-09-15 16:26:41 +02:00
2021-05-14 22:59:54 +02:00
exit :
2021-05-14 22:59:53 +02:00
mutex_unlock ( & bus - > mdio_lock ) ;
2021-05-14 22:59:56 +02:00
return ret ;
2016-09-15 16:26:41 +02:00
}
2021-05-14 22:59:57 +02:00
static int
2021-11-22 16:23:44 +01:00
qca8k_regmap_update_bits ( void * ctx , uint32_t reg , uint32_t mask , uint32_t write_val )
2016-09-15 16:26:41 +02:00
{
2021-11-22 16:23:44 +01:00
struct qca8k_priv * priv = ( struct qca8k_priv * ) ctx ;
2021-05-14 22:59:53 +02:00
struct mii_bus * bus = priv - > bus ;
2016-09-15 16:26:41 +02:00
u16 r1 , r2 , page ;
2021-05-14 22:59:57 +02:00
u32 val ;
int ret ;
2016-09-15 16:26:41 +02:00
2022-02-02 01:03:29 +01:00
if ( ! qca8k_regmap_update_bits_eth ( priv , reg , mask , write_val ) )
return 0 ;
2016-09-15 16:26:41 +02:00
qca8k_split_addr ( reg , & r1 , & r2 , & page ) ;
2021-05-14 22:59:53 +02:00
mutex_lock_nested ( & bus - > mdio_lock , MDIO_MUTEX_NESTED ) ;
2016-09-15 16:26:41 +02:00
2022-02-02 01:03:32 +01:00
ret = qca8k_set_page ( priv , page ) ;
2021-05-14 22:59:54 +02:00
if ( ret < 0 )
goto exit ;
2021-05-29 11:04:38 +08:00
ret = qca8k_mii_read32 ( bus , 0x10 | r2 , r1 , & val ) ;
if ( ret < 0 )
2021-05-14 22:59:57 +02:00
goto exit ;
val & = ~ mask ;
val | = write_val ;
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( priv , 0x10 | r2 , r1 , val ) ;
2016-09-15 16:26:41 +02:00
2021-05-14 22:59:54 +02:00
exit :
2021-05-14 22:59:53 +02:00
mutex_unlock ( & bus - > mdio_lock ) ;
2016-09-15 16:26:41 +02:00
return ret ;
}
2016-09-21 15:04:43 +00:00
static struct regmap_config qca8k_regmap_config = {
2016-09-15 16:26:41 +02:00
. reg_bits = 16 ,
. val_bits = 32 ,
. reg_stride = 4 ,
. max_register = 0x16ac , /* end MIB - Port6 range */
. reg_read = qca8k_regmap_read ,
. reg_write = qca8k_regmap_write ,
2021-11-22 16:23:44 +01:00
. reg_update_bits = qca8k_regmap_update_bits ,
2016-09-15 16:26:41 +02:00
. rd_table = & qca8k_readable_table ,
2021-11-22 16:23:44 +01:00
. disable_locking = true , /* Locking is handled by qca8k read/write */
. cache_type = REGCACHE_NONE , /* Explicitly disable CACHE */
2016-09-15 16:26:41 +02:00
} ;
2022-02-02 01:03:31 +01:00
static int
qca8k_phy_eth_busy_wait ( struct qca8k_mgmt_eth_data * mgmt_eth_data ,
struct sk_buff * read_skb , u32 * val )
{
struct sk_buff * skb = skb_copy ( read_skb , GFP_KERNEL ) ;
bool ack ;
int ret ;
reinit_completion ( & mgmt_eth_data - > rw_done ) ;
/* Increment seq_num and set it in the copy pkt */
mgmt_eth_data - > seq + + ;
qca8k_mdio_header_fill_seq_num ( skb , mgmt_eth_data - > seq ) ;
mgmt_eth_data - > ack = false ;
dev_queue_xmit ( skb ) ;
ret = wait_for_completion_timeout ( & mgmt_eth_data - > rw_done ,
QCA8K_ETHERNET_TIMEOUT ) ;
ack = mgmt_eth_data - > ack ;
if ( ret < = 0 )
return - ETIMEDOUT ;
if ( ! ack )
return - EINVAL ;
* val = mgmt_eth_data - > data [ 0 ] ;
return 0 ;
}
static int
qca8k_phy_eth_command ( struct qca8k_priv * priv , bool read , int phy ,
int regnum , u16 data )
{
struct sk_buff * write_skb , * clear_skb , * read_skb ;
struct qca8k_mgmt_eth_data * mgmt_eth_data ;
u32 write_val , clear_val = 0 , val ;
struct net_device * mgmt_master ;
int ret , ret1 ;
bool ack ;
if ( regnum > = QCA8K_MDIO_MASTER_MAX_REG )
return - EINVAL ;
mgmt_eth_data = & priv - > mgmt_eth_data ;
write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_PHY_ADDR ( phy ) |
QCA8K_MDIO_MASTER_REG_ADDR ( regnum ) ;
if ( read ) {
write_val | = QCA8K_MDIO_MASTER_READ ;
} else {
write_val | = QCA8K_MDIO_MASTER_WRITE ;
write_val | = QCA8K_MDIO_MASTER_DATA ( data ) ;
}
/* Prealloc all the needed skb before the lock */
2022-02-02 01:03:34 +01:00
write_skb = qca8k_alloc_mdio_header ( MDIO_WRITE , QCA8K_MDIO_MASTER_CTRL , & write_val ,
QCA8K_ETHERNET_PHY_PRIORITY , sizeof ( write_val ) ) ;
2022-02-02 01:03:31 +01:00
if ( ! write_skb )
return - ENOMEM ;
2022-02-02 01:03:34 +01:00
clear_skb = qca8k_alloc_mdio_header ( MDIO_WRITE , QCA8K_MDIO_MASTER_CTRL , & clear_val ,
QCA8K_ETHERNET_PHY_PRIORITY , sizeof ( clear_val ) ) ;
2022-02-04 13:03:36 +03:00
if ( ! clear_skb ) {
2022-02-02 01:03:31 +01:00
ret = - ENOMEM ;
goto err_clear_skb ;
}
2022-02-02 01:03:34 +01:00
read_skb = qca8k_alloc_mdio_header ( MDIO_READ , QCA8K_MDIO_MASTER_CTRL , & clear_val ,
QCA8K_ETHERNET_PHY_PRIORITY , sizeof ( clear_val ) ) ;
if ( ! read_skb ) {
2022-02-02 01:03:31 +01:00
ret = - ENOMEM ;
goto err_read_skb ;
}
/* Actually start the request:
* 1. Send mdio master packet
* 2. Busy Wait for mdio master command
* 3. Get the data if we are reading
* 4. Reset the mdio master ( even with error )
*/
mutex_lock ( & mgmt_eth_data - > mutex ) ;
/* Check if mgmt_master is operational */
mgmt_master = priv - > mgmt_master ;
if ( ! mgmt_master ) {
mutex_unlock ( & mgmt_eth_data - > mutex ) ;
ret = - EINVAL ;
goto err_mgmt_master ;
}
read_skb - > dev = mgmt_master ;
clear_skb - > dev = mgmt_master ;
write_skb - > dev = mgmt_master ;
reinit_completion ( & mgmt_eth_data - > rw_done ) ;
/* Increment seq_num and set it in the write pkt */
mgmt_eth_data - > seq + + ;
qca8k_mdio_header_fill_seq_num ( write_skb , mgmt_eth_data - > seq ) ;
mgmt_eth_data - > ack = false ;
dev_queue_xmit ( write_skb ) ;
ret = wait_for_completion_timeout ( & mgmt_eth_data - > rw_done ,
QCA8K_ETHERNET_TIMEOUT ) ;
ack = mgmt_eth_data - > ack ;
if ( ret < = 0 ) {
ret = - ETIMEDOUT ;
kfree_skb ( read_skb ) ;
goto exit ;
}
if ( ! ack ) {
ret = - EINVAL ;
kfree_skb ( read_skb ) ;
goto exit ;
}
ret = read_poll_timeout ( qca8k_phy_eth_busy_wait , ret1 ,
! ( val & QCA8K_MDIO_MASTER_BUSY ) , 0 ,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC , false ,
mgmt_eth_data , read_skb , & val ) ;
if ( ret < 0 & & ret1 < 0 ) {
ret = ret1 ;
goto exit ;
}
if ( read ) {
reinit_completion ( & mgmt_eth_data - > rw_done ) ;
/* Increment seq_num and set it in the read pkt */
mgmt_eth_data - > seq + + ;
qca8k_mdio_header_fill_seq_num ( read_skb , mgmt_eth_data - > seq ) ;
mgmt_eth_data - > ack = false ;
dev_queue_xmit ( read_skb ) ;
ret = wait_for_completion_timeout ( & mgmt_eth_data - > rw_done ,
QCA8K_ETHERNET_TIMEOUT ) ;
ack = mgmt_eth_data - > ack ;
if ( ret < = 0 ) {
ret = - ETIMEDOUT ;
goto exit ;
}
if ( ! ack ) {
ret = - EINVAL ;
goto exit ;
}
ret = mgmt_eth_data - > data [ 0 ] & QCA8K_MDIO_MASTER_DATA_MASK ;
} else {
kfree_skb ( read_skb ) ;
}
exit :
reinit_completion ( & mgmt_eth_data - > rw_done ) ;
/* Increment seq_num and set it in the clear pkt */
mgmt_eth_data - > seq + + ;
qca8k_mdio_header_fill_seq_num ( clear_skb , mgmt_eth_data - > seq ) ;
mgmt_eth_data - > ack = false ;
dev_queue_xmit ( clear_skb ) ;
wait_for_completion_timeout ( & mgmt_eth_data - > rw_done ,
QCA8K_ETHERNET_TIMEOUT ) ;
mutex_unlock ( & mgmt_eth_data - > mutex ) ;
return ret ;
/* Error handling before lock */
err_mgmt_master :
kfree_skb ( read_skb ) ;
err_read_skb :
kfree_skb ( clear_skb ) ;
err_clear_skb :
kfree_skb ( write_skb ) ;
return ret ;
}
2019-03-22 01:05:03 +01:00
static u32
qca8k_port_to_phy ( int port )
{
/* From Andrew Lunn:
* Port 0 has no internal phy .
* Port 1 has an internal PHY at MDIO address 0.
* Port 2 has an internal PHY at MDIO address 1.
* . . .
* Port 5 has an internal PHY at MDIO address 4.
* Port 6 has no internal PHY .
*/
return port - 1 ;
}
2021-05-14 23:00:08 +02:00
static int
2021-05-14 23:00:10 +02:00
qca8k_mdio_busy_wait ( struct mii_bus * bus , u32 reg , u32 mask )
2021-05-14 23:00:08 +02:00
{
u16 r1 , r2 , page ;
u32 val ;
2021-05-29 11:04:38 +08:00
int ret , ret1 ;
2021-05-14 23:00:08 +02:00
qca8k_split_addr ( reg , & r1 , & r2 , & page ) ;
2021-05-29 11:04:38 +08:00
ret = read_poll_timeout ( qca8k_mii_read32 , ret1 , ! ( val & mask ) , 0 ,
2021-05-14 23:00:08 +02:00
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC , false ,
2021-05-29 11:04:38 +08:00
bus , 0x10 | r2 , r1 , & val ) ;
2021-05-14 23:00:08 +02:00
/* Check if qca8k_read has failed for a different reason
* before returnting - ETIMEDOUT
*/
2021-05-29 11:04:38 +08:00
if ( ret < 0 & & ret1 < 0 )
return ret1 ;
2021-05-14 23:00:08 +02:00
return ret ;
}
2019-03-22 01:05:03 +01:00
static int
2022-02-02 01:03:32 +01:00
qca8k_mdio_write ( struct qca8k_priv * priv , int phy , int regnum , u16 data )
2019-03-22 01:05:03 +01:00
{
2022-02-02 01:03:32 +01:00
struct mii_bus * bus = priv - > bus ;
2021-05-14 23:00:08 +02:00
u16 r1 , r2 , page ;
2021-05-14 23:00:10 +02:00
u32 val ;
2021-05-14 22:59:56 +02:00
int ret ;
2019-03-22 01:05:03 +01:00
if ( regnum > = QCA8K_MDIO_MASTER_MAX_REG )
return - EINVAL ;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR ( phy ) |
QCA8K_MDIO_MASTER_REG_ADDR ( regnum ) |
QCA8K_MDIO_MASTER_DATA ( data ) ;
2021-05-14 23:00:08 +02:00
qca8k_split_addr ( QCA8K_MDIO_MASTER_CTRL , & r1 , & r2 , & page ) ;
2021-05-14 23:00:12 +02:00
mutex_lock_nested ( & bus - > mdio_lock , MDIO_MUTEX_NESTED ) ;
2021-05-14 23:00:08 +02:00
2022-02-02 01:03:32 +01:00
ret = qca8k_set_page ( priv , page ) ;
2021-05-14 22:59:56 +02:00
if ( ret )
2021-05-14 23:00:08 +02:00
goto exit ;
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( priv , 0x10 | r2 , r1 , val ) ;
2019-03-22 01:05:03 +01:00
2021-05-14 23:00:12 +02:00
ret = qca8k_mdio_busy_wait ( bus , QCA8K_MDIO_MASTER_CTRL ,
2021-05-14 23:00:08 +02:00
QCA8K_MDIO_MASTER_BUSY ) ;
exit :
2021-05-14 23:00:07 +02:00
/* even if the busy_wait timeouts try to clear the MASTER_EN */
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( priv , 0x10 | r2 , r1 , 0 ) ;
2021-05-14 23:00:10 +02:00
2021-05-14 23:00:12 +02:00
mutex_unlock ( & bus - > mdio_lock ) ;
2021-05-14 23:00:07 +02:00
return ret ;
2019-03-22 01:05:03 +01:00
}
static int
2022-02-02 01:03:32 +01:00
qca8k_mdio_read ( struct qca8k_priv * priv , int phy , int regnum )
2019-03-22 01:05:03 +01:00
{
2022-02-02 01:03:32 +01:00
struct mii_bus * bus = priv - > bus ;
2021-05-14 23:00:08 +02:00
u16 r1 , r2 , page ;
2021-05-14 23:00:10 +02:00
u32 val ;
2021-05-14 22:59:56 +02:00
int ret ;
2019-03-22 01:05:03 +01:00
if ( regnum > = QCA8K_MDIO_MASTER_MAX_REG )
return - EINVAL ;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR ( phy ) |
QCA8K_MDIO_MASTER_REG_ADDR ( regnum ) ;
2021-05-14 23:00:08 +02:00
qca8k_split_addr ( QCA8K_MDIO_MASTER_CTRL , & r1 , & r2 , & page ) ;
2019-03-22 01:05:03 +01:00
2021-05-14 23:00:12 +02:00
mutex_lock_nested ( & bus - > mdio_lock , MDIO_MUTEX_NESTED ) ;
2021-05-14 23:00:08 +02:00
2022-02-02 01:03:32 +01:00
ret = qca8k_set_page ( priv , page ) ;
2021-05-14 22:59:58 +02:00
if ( ret )
2021-05-14 23:00:08 +02:00
goto exit ;
2019-03-22 01:05:03 +01:00
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( priv , 0x10 | r2 , r1 , val ) ;
2021-05-14 22:59:55 +02:00
2021-05-14 23:00:12 +02:00
ret = qca8k_mdio_busy_wait ( bus , QCA8K_MDIO_MASTER_CTRL ,
2021-05-14 23:00:08 +02:00
QCA8K_MDIO_MASTER_BUSY ) ;
if ( ret )
goto exit ;
2021-05-29 11:04:38 +08:00
ret = qca8k_mii_read32 ( bus , 0x10 | r2 , r1 , & val ) ;
2019-03-22 01:05:03 +01:00
2021-05-14 23:00:08 +02:00
exit :
2021-05-14 23:00:10 +02:00
/* even if the busy_wait timeouts try to clear the MASTER_EN */
2022-02-02 01:03:33 +01:00
qca8k_mii_write32 ( priv , 0x10 | r2 , r1 , 0 ) ;
2021-05-14 23:00:10 +02:00
2021-05-14 23:00:12 +02:00
mutex_unlock ( & bus - > mdio_lock ) ;
2021-05-14 23:00:08 +02:00
2021-05-29 11:04:38 +08:00
if ( ret > = 0 )
ret = val & QCA8K_MDIO_MASTER_DATA_MASK ;
2021-05-14 23:00:08 +02:00
2021-05-29 11:04:38 +08:00
return ret ;
2019-03-22 01:05:03 +01:00
}
2021-09-11 17:50:09 +02:00
static int
qca8k_internal_mdio_write ( struct mii_bus * slave_bus , int phy , int regnum , u16 data )
{
struct qca8k_priv * priv = slave_bus - > priv ;
2022-02-02 01:03:31 +01:00
int ret ;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command ( priv , false , phy , regnum , data ) ;
if ( ! ret )
return 0 ;
2021-09-11 17:50:09 +02:00
2022-02-02 01:03:32 +01:00
return qca8k_mdio_write ( priv , phy , regnum , data ) ;
2021-09-11 17:50:09 +02:00
}
static int
qca8k_internal_mdio_read ( struct mii_bus * slave_bus , int phy , int regnum )
{
struct qca8k_priv * priv = slave_bus - > priv ;
2022-02-02 01:03:31 +01:00
int ret ;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command ( priv , true , phy , regnum , 0 ) ;
if ( ret > = 0 )
return ret ;
2021-09-11 17:50:09 +02:00
2022-04-16 01:30:16 +02:00
ret = qca8k_mdio_read ( priv , phy , regnum ) ;
if ( ret < 0 )
return 0xffff ;
return ret ;
2021-09-11 17:50:09 +02:00
}
2019-03-22 01:05:03 +01:00
static int
2022-04-16 01:30:14 +02:00
qca8k_legacy_mdio_write ( struct mii_bus * slave_bus , int port , int regnum , u16 data )
2019-03-22 01:05:03 +01:00
{
2022-04-16 01:30:14 +02:00
port = qca8k_port_to_phy ( port ) % PHY_MAX_ADDR ;
2021-05-14 23:00:10 +02:00
2022-04-16 01:30:14 +02:00
return qca8k_internal_mdio_write ( slave_bus , port , regnum , data ) ;
2019-03-22 01:05:03 +01:00
}
static int
2022-04-16 01:30:14 +02:00
qca8k_legacy_mdio_read ( struct mii_bus * slave_bus , int port , int regnum )
2019-03-22 01:05:03 +01:00
{
2022-04-16 01:30:14 +02:00
port = qca8k_port_to_phy ( port ) % PHY_MAX_ADDR ;
2019-03-22 01:05:03 +01:00
2022-04-16 01:30:14 +02:00
return qca8k_internal_mdio_read ( slave_bus , port , regnum ) ;
2019-03-22 01:05:03 +01:00
}
2021-05-14 23:00:10 +02:00
static int
2022-04-16 01:30:14 +02:00
qca8k_mdio_register ( struct qca8k_priv * priv )
2021-05-14 23:00:10 +02:00
{
struct dsa_switch * ds = priv - > ds ;
2022-04-16 01:30:14 +02:00
struct device_node * mdio ;
2021-05-14 23:00:10 +02:00
struct mii_bus * bus ;
bus = devm_mdiobus_alloc ( ds - > dev ) ;
if ( ! bus )
return - ENOMEM ;
bus - > priv = ( void * ) priv ;
2022-04-16 01:30:17 +02:00
snprintf ( bus - > id , MII_BUS_ID_SIZE , " qca8k-%d.%d " ,
ds - > dst - > index , ds - > index ) ;
2021-05-14 23:00:10 +02:00
bus - > parent = ds - > dev ;
bus - > phy_mask = ~ ds - > phys_mii_mask ;
ds - > slave_mii_bus = bus ;
2022-04-16 01:30:14 +02:00
/* Check if the devicetree declare the port:phy mapping */
mdio = of_get_child_by_name ( priv - > dev - > of_node , " mdio " ) ;
if ( of_device_is_available ( mdio ) ) {
bus - > name = " qca8k slave mii " ;
bus - > read = qca8k_internal_mdio_read ;
bus - > write = qca8k_internal_mdio_write ;
return devm_of_mdiobus_register ( priv - > dev , bus , mdio ) ;
}
/* If a mapping can't be found the legacy mapping is used,
* using the qca8k_port_to_phy function
*/
bus - > name = " qca8k-legacy slave mii " ;
bus - > read = qca8k_legacy_mdio_read ;
bus - > write = qca8k_legacy_mdio_write ;
return devm_mdiobus_register ( priv - > dev , bus ) ;
2021-05-14 23:00:10 +02:00
}
2019-03-22 01:05:03 +01:00
static int
qca8k_setup_mdio_bus ( struct qca8k_priv * priv )
{
u32 internal_mdio_mask = 0 , external_mdio_mask = 0 , reg ;
2022-04-16 01:30:14 +02:00
struct device_node * ports , * port ;
2021-05-14 23:00:10 +02:00
phy_interface_t mode ;
2019-03-22 01:05:03 +01:00
int err ;
ports = of_get_child_by_name ( priv - > dev - > of_node , " ports " ) ;
2021-05-14 23:00:05 +02:00
if ( ! ports )
ports = of_get_child_by_name ( priv - > dev - > of_node , " ethernet-ports " ) ;
2019-03-22 01:05:03 +01:00
if ( ! ports )
return - EINVAL ;
for_each_available_child_of_node ( ports , port ) {
err = of_property_read_u32 ( port , " reg " , & reg ) ;
2019-08-04 21:00:18 +05:30
if ( err ) {
of_node_put ( port ) ;
of_node_put ( ports ) ;
2019-03-22 01:05:03 +01:00
return err ;
2019-08-04 21:00:18 +05:30
}
2019-03-22 01:05:03 +01:00
if ( ! dsa_is_user_port ( priv - > ds , reg ) )
continue ;
2021-05-14 23:00:10 +02:00
of_get_phy_mode ( port , & mode ) ;
if ( of_property_read_bool ( port , " phy-handle " ) & &
mode ! = PHY_INTERFACE_MODE_INTERNAL )
2019-03-22 01:05:03 +01:00
external_mdio_mask | = BIT ( reg ) ;
else
internal_mdio_mask | = BIT ( reg ) ;
}
2019-08-04 21:00:18 +05:30
of_node_put ( ports ) ;
2019-03-22 01:05:03 +01:00
if ( ! external_mdio_mask & & ! internal_mdio_mask ) {
dev_err ( priv - > dev , " no PHYs are defined. \n " ) ;
return - EINVAL ;
}
/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
* the MDIO_MASTER register also _disconnects_ the external MDC
* passthrough to the internal PHYs . It ' s not possible to use both
* configurations at the same time !
*
* Because this came up during the review process :
* If the external mdio - bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex / spin - locking out the qca8k ' s
* accessors for the time being , it would be possible to pull this
* off .
*/
if ( ! ! external_mdio_mask & & ! ! internal_mdio_mask ) {
dev_err ( priv - > dev , " either internal or external mdio bus configuration is supported. \n " ) ;
return - EINVAL ;
}
if ( external_mdio_mask ) {
/* Make sure to disable the internal mdio bus in cases
* a dt - overlay and driver reload changed the configuration
*/
2021-11-22 16:23:44 +01:00
return regmap_clear_bits ( priv - > regmap , QCA8K_MDIO_MASTER_CTRL ,
QCA8K_MDIO_MASTER_EN ) ;
2019-03-22 01:05:03 +01:00
}
2022-04-16 01:30:14 +02:00
return qca8k_mdio_register ( priv ) ;
2019-03-22 01:05:03 +01:00
}
2021-10-14 00:39:06 +02:00
static int
qca8k_setup_mac_pwr_sel ( struct qca8k_priv * priv )
{
u32 mask = 0 ;
int ret = 0 ;
/* SoC specific settings for ipq8064.
* If more device require this consider adding
* a dedicated binding .
*/
if ( of_machine_is_compatible ( " qcom,ipq8064 " ) )
mask | = QCA8K_MAC_PWR_RGMII0_1_8V ;
/* SoC specific settings for ipq8065 */
if ( of_machine_is_compatible ( " qcom,ipq8065 " ) )
mask | = QCA8K_MAC_PWR_RGMII1_1_8V ;
if ( mask ) {
ret = qca8k_rmw ( priv , QCA8K_REG_MAC_PWR_SEL ,
QCA8K_MAC_PWR_RGMII0_1_8V |
QCA8K_MAC_PWR_RGMII1_1_8V ,
mask ) ;
}
return ret ;
}
2021-10-14 00:39:10 +02:00
static int qca8k_find_cpu_port ( struct dsa_switch * ds )
{
struct qca8k_priv * priv = ds - > priv ;
/* Find the connected cpu port. Valid port are 0 or 6 */
if ( dsa_is_cpu_port ( ds , 0 ) )
return 0 ;
dev_dbg ( priv - > dev , " port 0 is not the CPU port. Checking port 6 " ) ;
if ( dsa_is_cpu_port ( ds , 6 ) )
return 6 ;
return - EINVAL ;
}
2021-10-14 00:39:15 +02:00
static int
qca8k_setup_of_pws_reg ( struct qca8k_priv * priv )
{
2022-07-27 13:35:10 +02:00
const struct qca8k_match_data * data = priv - > info ;
2021-10-14 00:39:15 +02:00
struct device_node * node = priv - > dev - > of_node ;
u32 val = 0 ;
int ret ;
/* QCA8327 require to set to the correct mode.
* His bigger brother QCA8328 have the 172 pin layout .
* Should be applied by default but we set this just to make sure .
*/
if ( priv - > switch_id = = QCA8K_ID_QCA8327 ) {
2021-10-14 00:39:17 +02:00
/* Set the correct package of 148 pin for QCA8327 */
if ( data - > reduced_package )
val | = QCA8327_PWS_PACKAGE148_EN ;
2021-10-14 00:39:15 +02:00
ret = qca8k_rmw ( priv , QCA8K_REG_PWS , QCA8327_PWS_PACKAGE148_EN ,
2021-10-14 00:39:17 +02:00
val ) ;
2021-10-14 00:39:15 +02:00
if ( ret )
return ret ;
}
if ( of_property_read_bool ( node , " qca,ignore-power-on-sel " ) )
val | = QCA8K_PWS_POWER_ON_SEL ;
if ( of_property_read_bool ( node , " qca,led-open-drain " ) ) {
if ( ! ( val & QCA8K_PWS_POWER_ON_SEL ) ) {
dev_err ( priv - > dev , " qca,led-open-drain require qca,ignore-power-on-sel to be set. " ) ;
return - EINVAL ;
}
val | = QCA8K_PWS_LED_OPEN_EN_CSR ;
}
return qca8k_rmw ( priv , QCA8K_REG_PWS ,
QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL ,
val ) ;
}
2021-10-14 00:39:08 +02:00
static int
qca8k_parse_port_config ( struct qca8k_priv * priv )
{
2021-10-17 16:56:46 +02:00
int port , cpu_port_index = - 1 , ret ;
2021-10-14 00:39:08 +02:00
struct device_node * port_dn ;
phy_interface_t mode ;
struct dsa_port * dp ;
2021-10-14 00:39:11 +02:00
u32 delay ;
2021-10-14 00:39:08 +02:00
/* We have 2 CPU port. Check them */
2021-11-22 16:23:40 +01:00
for ( port = 0 ; port < QCA8K_NUM_PORTS ; port + + ) {
2021-10-14 00:39:08 +02:00
/* Skip every other port */
if ( port ! = 0 & & port ! = 6 )
continue ;
dp = dsa_to_port ( priv - > ds , port ) ;
port_dn = dp - > dn ;
2021-10-14 00:39:11 +02:00
cpu_port_index + + ;
2021-10-14 00:39:08 +02:00
if ( ! of_device_is_available ( port_dn ) )
continue ;
ret = of_get_phy_mode ( port_dn , & mode ) ;
if ( ret )
continue ;
2021-10-14 00:39:11 +02:00
switch ( mode ) {
case PHY_INTERFACE_MODE_RGMII :
case PHY_INTERFACE_MODE_RGMII_ID :
case PHY_INTERFACE_MODE_RGMII_TXID :
case PHY_INTERFACE_MODE_RGMII_RXID :
2021-10-14 00:39:18 +02:00
case PHY_INTERFACE_MODE_SGMII :
2021-10-14 00:39:11 +02:00
delay = 0 ;
if ( ! of_property_read_u32 ( port_dn , " tx-internal-delay-ps " , & delay ) )
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000 ;
else if ( mode = = PHY_INTERFACE_MODE_RGMII_ID | |
mode = = PHY_INTERFACE_MODE_RGMII_TXID )
delay = 1 ;
2021-11-22 16:23:41 +01:00
if ( ! FIELD_FIT ( QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK , delay ) ) {
2021-10-14 00:39:11 +02:00
dev_err ( priv - > dev , " rgmii tx delay is limited to a max value of 3ns, setting to the max value " ) ;
delay = 3 ;
}
2021-10-14 00:39:19 +02:00
priv - > ports_config . rgmii_tx_delay [ cpu_port_index ] = delay ;
2021-10-14 00:39:11 +02:00
delay = 0 ;
if ( ! of_property_read_u32 ( port_dn , " rx-internal-delay-ps " , & delay ) )
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000 ;
else if ( mode = = PHY_INTERFACE_MODE_RGMII_ID | |
mode = = PHY_INTERFACE_MODE_RGMII_RXID )
delay = 2 ;
2021-11-22 16:23:41 +01:00
if ( ! FIELD_FIT ( QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK , delay ) ) {
2021-10-14 00:39:11 +02:00
dev_err ( priv - > dev , " rgmii rx delay is limited to a max value of 3ns, setting to the max value " ) ;
delay = 3 ;
}
2021-10-14 00:39:19 +02:00
priv - > ports_config . rgmii_rx_delay [ cpu_port_index ] = delay ;
2021-10-14 00:39:11 +02:00
2021-10-14 00:39:18 +02:00
/* Skip sgmii parsing for rgmii* mode */
if ( mode = = PHY_INTERFACE_MODE_RGMII | |
mode = = PHY_INTERFACE_MODE_RGMII_ID | |
mode = = PHY_INTERFACE_MODE_RGMII_TXID | |
mode = = PHY_INTERFACE_MODE_RGMII_RXID )
break ;
2021-10-14 00:39:08 +02:00
if ( of_property_read_bool ( port_dn , " qca,sgmii-txclk-falling-edge " ) )
2021-10-14 00:39:19 +02:00
priv - > ports_config . sgmii_tx_clk_falling_edge = true ;
2021-10-14 00:39:08 +02:00
if ( of_property_read_bool ( port_dn , " qca,sgmii-rxclk-falling-edge " ) )
2021-10-14 00:39:19 +02:00
priv - > ports_config . sgmii_rx_clk_falling_edge = true ;
2021-10-14 00:39:11 +02:00
2021-10-14 00:39:13 +02:00
if ( of_property_read_bool ( port_dn , " qca,sgmii-enable-pll " ) ) {
2021-10-14 00:39:19 +02:00
priv - > ports_config . sgmii_enable_pll = true ;
2021-10-14 00:39:13 +02:00
if ( priv - > switch_id = = QCA8K_ID_QCA8327 ) {
dev_err ( priv - > dev , " SGMII PLL should NOT be enabled for qca8327. Aborting enabling " ) ;
2021-10-14 00:39:19 +02:00
priv - > ports_config . sgmii_enable_pll = false ;
2021-10-14 00:39:13 +02:00
}
if ( priv - > switch_revision < 2 )
dev_warn ( priv - > dev , " SGMII PLL should NOT be enabled for qca8337 with revision 2 or more. " ) ;
}
2021-10-14 00:39:11 +02:00
break ;
default :
continue ;
2021-10-14 00:39:08 +02:00
}
}
return 0 ;
}
2021-10-14 00:39:18 +02:00
static void
qca8k_mac_config_setup_internal_delay ( struct qca8k_priv * priv , int cpu_port_index ,
u32 reg )
{
u32 delay , val = 0 ;
int ret ;
/* Delay can be declared in 3 different way.
* Mode to rgmii and internal - delay standard binding defined
* rgmii - id or rgmii - tx / rx phy mode set .
* The parse logic set a delay different than 0 only when one
* of the 3 different way is used . In all other case delay is
* not enabled . With ID or TX / RXID delay is enabled and set
* to the default and recommended value .
*/
2021-10-14 00:39:19 +02:00
if ( priv - > ports_config . rgmii_tx_delay [ cpu_port_index ] ) {
delay = priv - > ports_config . rgmii_tx_delay [ cpu_port_index ] ;
2021-10-14 00:39:18 +02:00
val | = QCA8K_PORT_PAD_RGMII_TX_DELAY ( delay ) |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN ;
}
2021-10-14 00:39:19 +02:00
if ( priv - > ports_config . rgmii_rx_delay [ cpu_port_index ] ) {
delay = priv - > ports_config . rgmii_rx_delay [ cpu_port_index ] ;
2021-10-14 00:39:18 +02:00
val | = QCA8K_PORT_PAD_RGMII_RX_DELAY ( delay ) |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN ;
}
/* Set RGMII delay based on the selected values */
ret = qca8k_rmw ( priv , reg ,
QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN ,
val ) ;
if ( ret )
dev_err ( priv - > dev , " Failed to set internal delay for CPU port%d " ,
cpu_port_index = = QCA8K_CPU_PORT0 ? 0 : 6 ) ;
}
2022-02-17 18:30:51 +00:00
static struct phylink_pcs *
qca8k_phylink_mac_select_pcs ( struct dsa_switch * ds , int port ,
phy_interface_t interface )
{
struct qca8k_priv * priv = ds - > priv ;
struct phylink_pcs * pcs = NULL ;
switch ( interface ) {
case PHY_INTERFACE_MODE_SGMII :
case PHY_INTERFACE_MODE_1000BASEX :
switch ( port ) {
case 0 :
pcs = & priv - > pcs_port_0 . pcs ;
break ;
case 6 :
pcs = & priv - > pcs_port_6 . pcs ;
break ;
}
break ;
default :
break ;
}
return pcs ;
}
2018-05-23 08:20:22 +02:00
static void
2020-06-20 11:30:32 +01:00
qca8k_phylink_mac_config ( struct dsa_switch * ds , int port , unsigned int mode ,
const struct phylink_link_state * state )
2018-05-23 08:20:22 +02:00
{
struct qca8k_priv * priv = ds - > priv ;
2022-02-17 18:30:56 +00:00
int cpu_port_index ;
u32 reg ;
2018-05-23 08:20:22 +02:00
2020-06-20 11:30:32 +01:00
switch ( port ) {
case 0 : /* 1st CPU port */
if ( state - > interface ! = PHY_INTERFACE_MODE_RGMII & &
state - > interface ! = PHY_INTERFACE_MODE_RGMII_ID & &
2021-05-14 23:00:06 +02:00
state - > interface ! = PHY_INTERFACE_MODE_RGMII_TXID & &
state - > interface ! = PHY_INTERFACE_MODE_RGMII_RXID & &
2020-06-20 11:30:32 +01:00
state - > interface ! = PHY_INTERFACE_MODE_SGMII )
return ;
reg = QCA8K_REG_PORT0_PAD_CTRL ;
2021-10-14 00:39:11 +02:00
cpu_port_index = QCA8K_CPU_PORT0 ;
2020-06-20 11:30:32 +01:00
break ;
case 1 :
case 2 :
case 3 :
case 4 :
case 5 :
/* Internal PHY, nothing to do */
2018-05-23 08:20:22 +02:00
return ;
2020-06-20 11:30:32 +01:00
case 6 : /* 2nd CPU port / external PHY */
if ( state - > interface ! = PHY_INTERFACE_MODE_RGMII & &
state - > interface ! = PHY_INTERFACE_MODE_RGMII_ID & &
2021-05-14 23:00:06 +02:00
state - > interface ! = PHY_INTERFACE_MODE_RGMII_TXID & &
state - > interface ! = PHY_INTERFACE_MODE_RGMII_RXID & &
2020-06-20 11:30:32 +01:00
state - > interface ! = PHY_INTERFACE_MODE_SGMII & &
state - > interface ! = PHY_INTERFACE_MODE_1000BASEX )
return ;
2018-05-23 08:20:22 +02:00
2020-06-20 11:30:32 +01:00
reg = QCA8K_REG_PORT6_PAD_CTRL ;
2021-10-14 00:39:11 +02:00
cpu_port_index = QCA8K_CPU_PORT6 ;
2018-05-23 08:20:22 +02:00
break ;
2020-06-20 11:30:32 +01:00
default :
dev_err ( ds - > dev , " %s: unsupported port: %i \n " , __func__ , port ) ;
return ;
}
if ( port ! = 6 & & phylink_autoneg_inband ( mode ) ) {
dev_err ( ds - > dev , " %s: in-band negotiation unsupported \n " ,
__func__ ) ;
return ;
}
switch ( state - > interface ) {
case PHY_INTERFACE_MODE_RGMII :
case PHY_INTERFACE_MODE_RGMII_ID :
2021-05-14 23:00:06 +02:00
case PHY_INTERFACE_MODE_RGMII_TXID :
case PHY_INTERFACE_MODE_RGMII_RXID :
2021-10-14 00:39:18 +02:00
qca8k_write ( priv , reg , QCA8K_PORT_PAD_RGMII_EN ) ;
2021-10-14 00:39:11 +02:00
2021-10-14 00:39:18 +02:00
/* Configure rgmii delay */
qca8k_mac_config_setup_internal_delay ( priv , cpu_port_index , reg ) ;
2021-10-14 00:39:11 +02:00
/* QCA8337 requires to set rgmii rx delay for all ports.
* This is enabled through PORT5_PAD_CTRL for all ports ,
* rather than individual port registers .
2020-06-20 11:30:32 +01:00
*/
2021-05-14 23:00:02 +02:00
if ( priv - > switch_id = = QCA8K_ID_QCA8337 )
qca8k_write ( priv , QCA8K_REG_PORT5_PAD_CTRL ,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN ) ;
2020-06-20 11:30:32 +01:00
break ;
case PHY_INTERFACE_MODE_SGMII :
case PHY_INTERFACE_MODE_1000BASEX :
/* Enable SGMII on the port */
qca8k_write ( priv , reg , QCA8K_PORT_PAD_SGMII_EN ) ;
break ;
default :
dev_err ( ds - > dev , " xMII mode %s not supported for port %d \n " ,
phy_modes ( state - > interface ) , port ) ;
return ;
}
}
2022-02-02 10:24:23 +00:00
static void qca8k_phylink_get_caps ( struct dsa_switch * ds , int port ,
struct phylink_config * config )
2020-06-20 11:30:32 +01:00
{
switch ( port ) {
case 0 : /* 1st CPU port */
2022-02-02 10:24:23 +00:00
phy_interface_set_rgmii ( config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_SGMII ,
config - > supported_interfaces ) ;
2020-06-20 11:30:32 +01:00
break ;
2022-02-02 10:24:23 +00:00
2020-06-20 11:30:32 +01:00
case 1 :
case 2 :
case 3 :
case 4 :
case 5 :
/* Internal PHY */
2022-02-02 10:24:23 +00:00
__set_bit ( PHY_INTERFACE_MODE_GMII ,
config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_INTERNAL ,
config - > supported_interfaces ) ;
2020-06-20 11:30:32 +01:00
break ;
2022-02-02 10:24:23 +00:00
2020-06-20 11:30:32 +01:00
case 6 : /* 2nd CPU port / external PHY */
2022-02-02 10:24:23 +00:00
phy_interface_set_rgmii ( config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_SGMII ,
config - > supported_interfaces ) ;
__set_bit ( PHY_INTERFACE_MODE_1000BASEX ,
config - > supported_interfaces ) ;
2018-05-23 08:20:22 +02:00
break ;
}
2022-02-02 10:24:23 +00:00
config - > mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD ;
2022-02-17 18:31:01 +00:00
config - > legacy_pre_march2020 = false ;
2020-06-20 11:30:32 +01:00
}
static void
qca8k_phylink_mac_link_down ( struct dsa_switch * ds , int port , unsigned int mode ,
phy_interface_t interface )
{
struct qca8k_priv * priv = ds - > priv ;
2018-05-23 08:20:22 +02:00
qca8k_port_set_status ( priv , port , 0 ) ;
2020-06-20 11:30:32 +01:00
}
static void
qca8k_phylink_mac_link_up ( struct dsa_switch * ds , int port , unsigned int mode ,
phy_interface_t interface , struct phy_device * phydev ,
int speed , int duplex , bool tx_pause , bool rx_pause )
{
struct qca8k_priv * priv = ds - > priv ;
u32 reg ;
if ( phylink_autoneg_inband ( mode ) ) {
reg = QCA8K_PORT_STATUS_LINK_AUTO ;
} else {
switch ( speed ) {
case SPEED_10 :
reg = QCA8K_PORT_STATUS_SPEED_10 ;
break ;
case SPEED_100 :
reg = QCA8K_PORT_STATUS_SPEED_100 ;
break ;
case SPEED_1000 :
reg = QCA8K_PORT_STATUS_SPEED_1000 ;
break ;
default :
reg = QCA8K_PORT_STATUS_LINK_AUTO ;
break ;
}
if ( duplex = = DUPLEX_FULL )
reg | = QCA8K_PORT_STATUS_DUPLEX ;
if ( rx_pause | | dsa_is_cpu_port ( ds , port ) )
reg | = QCA8K_PORT_STATUS_RXFLOW ;
if ( tx_pause | | dsa_is_cpu_port ( ds , port ) )
reg | = QCA8K_PORT_STATUS_TXFLOW ;
}
reg | = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC ;
2018-05-23 08:20:22 +02:00
qca8k_write ( priv , QCA8K_REG_PORT_STATUS ( port ) , reg ) ;
}
2022-02-17 18:30:51 +00:00
static struct qca8k_pcs * pcs_to_qca8k_pcs ( struct phylink_pcs * pcs )
2022-02-17 18:30:45 +00:00
{
2022-02-17 18:30:51 +00:00
return container_of ( pcs , struct qca8k_pcs , pcs ) ;
}
static void qca8k_pcs_get_state ( struct phylink_pcs * pcs ,
struct phylink_link_state * state )
{
struct qca8k_priv * priv = pcs_to_qca8k_pcs ( pcs ) - > priv ;
int port = pcs_to_qca8k_pcs ( pcs ) - > port ;
2022-02-17 18:30:45 +00:00
u32 reg ;
int ret ;
ret = qca8k_read ( priv , QCA8K_REG_PORT_STATUS ( port ) , & reg ) ;
2022-02-17 18:30:51 +00:00
if ( ret < 0 ) {
state - > link = false ;
return ;
}
2022-02-17 18:30:45 +00:00
state - > link = ! ! ( reg & QCA8K_PORT_STATUS_LINK_UP ) ;
state - > an_complete = state - > link ;
state - > an_enabled = ! ! ( reg & QCA8K_PORT_STATUS_LINK_AUTO ) ;
state - > duplex = ( reg & QCA8K_PORT_STATUS_DUPLEX ) ? DUPLEX_FULL :
DUPLEX_HALF ;
switch ( reg & QCA8K_PORT_STATUS_SPEED ) {
case QCA8K_PORT_STATUS_SPEED_10 :
state - > speed = SPEED_10 ;
break ;
case QCA8K_PORT_STATUS_SPEED_100 :
state - > speed = SPEED_100 ;
break ;
case QCA8K_PORT_STATUS_SPEED_1000 :
state - > speed = SPEED_1000 ;
break ;
default :
state - > speed = SPEED_UNKNOWN ;
break ;
}
if ( reg & QCA8K_PORT_STATUS_RXFLOW )
state - > pause | = MLO_PAUSE_RX ;
if ( reg & QCA8K_PORT_STATUS_TXFLOW )
state - > pause | = MLO_PAUSE_TX ;
2022-02-17 18:30:51 +00:00
}
2022-02-17 18:30:45 +00:00
2022-02-17 18:30:51 +00:00
static int qca8k_pcs_config ( struct phylink_pcs * pcs , unsigned int mode ,
phy_interface_t interface ,
const unsigned long * advertising ,
bool permit_pause_to_mac )
{
2022-02-17 18:30:56 +00:00
struct qca8k_priv * priv = pcs_to_qca8k_pcs ( pcs ) - > priv ;
int cpu_port_index , ret , port ;
u32 reg , val ;
port = pcs_to_qca8k_pcs ( pcs ) - > port ;
switch ( port ) {
case 0 :
reg = QCA8K_REG_PORT0_PAD_CTRL ;
cpu_port_index = QCA8K_CPU_PORT0 ;
break ;
case 6 :
reg = QCA8K_REG_PORT6_PAD_CTRL ;
cpu_port_index = QCA8K_CPU_PORT6 ;
break ;
default :
WARN_ON ( 1 ) ;
2022-02-24 22:05:57 +00:00
return - EINVAL ;
2022-02-17 18:30:56 +00:00
}
/* Enable/disable SerDes auto-negotiation as necessary */
ret = qca8k_read ( priv , QCA8K_REG_PWS , & val ) ;
if ( ret )
return ret ;
if ( phylink_autoneg_inband ( mode ) )
val & = ~ QCA8K_PWS_SERDES_AEN_DIS ;
else
val | = QCA8K_PWS_SERDES_AEN_DIS ;
qca8k_write ( priv , QCA8K_REG_PWS , val ) ;
/* Configure the SGMII parameters */
ret = qca8k_read ( priv , QCA8K_REG_SGMII_CTRL , & val ) ;
if ( ret )
return ret ;
val | = QCA8K_SGMII_EN_SD ;
if ( priv - > ports_config . sgmii_enable_pll )
val | = QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX ;
if ( dsa_is_cpu_port ( priv - > ds , port ) ) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
val & = ~ QCA8K_SGMII_MODE_CTRL_MASK ;
val | = QCA8K_SGMII_MODE_CTRL_PHY ;
} else if ( interface = = PHY_INTERFACE_MODE_SGMII ) {
val & = ~ QCA8K_SGMII_MODE_CTRL_MASK ;
val | = QCA8K_SGMII_MODE_CTRL_MAC ;
} else if ( interface = = PHY_INTERFACE_MODE_1000BASEX ) {
val & = ~ QCA8K_SGMII_MODE_CTRL_MASK ;
val | = QCA8K_SGMII_MODE_CTRL_BASEX ;
}
qca8k_write ( priv , QCA8K_REG_SGMII_CTRL , val ) ;
/* From original code is reported port instability as SGMII also
* require delay set . Apply advised values here or take them from DT .
*/
if ( interface = = PHY_INTERFACE_MODE_SGMII )
qca8k_mac_config_setup_internal_delay ( priv , cpu_port_index , reg ) ;
/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
* falling edge is set writing in the PORT0 PAD reg
*/
if ( priv - > switch_id = = QCA8K_ID_QCA8327 | |
priv - > switch_id = = QCA8K_ID_QCA8337 )
reg = QCA8K_REG_PORT0_PAD_CTRL ;
val = 0 ;
/* SGMII Clock phase configuration */
if ( priv - > ports_config . sgmii_rx_clk_falling_edge )
val | = QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE ;
if ( priv - > ports_config . sgmii_tx_clk_falling_edge )
val | = QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE ;
if ( val )
ret = qca8k_rmw ( priv , reg ,
QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE ,
val ) ;
2022-02-17 18:30:51 +00:00
return 0 ;
}
static void qca8k_pcs_an_restart ( struct phylink_pcs * pcs )
{
}
static const struct phylink_pcs_ops qca8k_pcs_ops = {
. pcs_get_state = qca8k_pcs_get_state ,
. pcs_config = qca8k_pcs_config ,
. pcs_an_restart = qca8k_pcs_an_restart ,
} ;
static void qca8k_setup_pcs ( struct qca8k_priv * priv , struct qca8k_pcs * qpcs ,
int port )
{
qpcs - > pcs . ops = & qca8k_pcs_ops ;
/* We don't have interrupts for link changes, so we need to poll */
qpcs - > pcs . poll = true ;
qpcs - > priv = priv ;
qpcs - > port = port ;
2022-02-17 18:30:45 +00:00
}
2022-02-02 01:03:30 +01:00
static void qca8k_mib_autocast_handler ( struct dsa_switch * ds , struct sk_buff * skb )
{
struct qca8k_mib_eth_data * mib_eth_data ;
struct qca8k_priv * priv = ds - > priv ;
const struct qca8k_mib_desc * mib ;
struct mib_ethhdr * mib_ethhdr ;
2022-10-12 19:18:37 +02:00
__le32 * data2 ;
2022-02-02 01:03:30 +01:00
u8 port ;
2022-10-12 19:18:37 +02:00
int i ;
2022-02-02 01:03:30 +01:00
mib_ethhdr = ( struct mib_ethhdr * ) skb_mac_header ( skb ) ;
mib_eth_data = & priv - > mib_eth_data ;
/* The switch autocast every port. Ignore other packet and
* parse only the requested one .
*/
port = FIELD_GET ( QCA_HDR_RECV_SOURCE_PORT , ntohs ( mib_ethhdr - > hdr ) ) ;
if ( port ! = mib_eth_data - > req_port )
goto exit ;
2022-10-12 19:18:37 +02:00
data2 = ( __le32 * ) skb - > data ;
2022-02-02 01:03:30 +01:00
2022-07-27 13:35:10 +02:00
for ( i = 0 ; i < priv - > info - > mib_count ; i + + ) {
2022-02-02 01:03:30 +01:00
mib = & ar8327_mib [ i ] ;
/* First 3 mib are present in the skb head */
if ( i < 3 ) {
2022-10-12 19:18:37 +02:00
mib_eth_data - > data [ i ] = get_unaligned_le32 ( mib_ethhdr - > data + i ) ;
2022-02-02 01:03:30 +01:00
continue ;
}
/* Some mib are 64 bit wide */
if ( mib - > size = = 2 )
2022-10-12 19:18:37 +02:00
mib_eth_data - > data [ i ] = get_unaligned_le64 ( ( __le64 * ) data2 ) ;
else
mib_eth_data - > data [ i ] = get_unaligned_le32 ( data2 ) ;
2022-02-02 01:03:30 +01:00
2022-10-12 19:18:37 +02:00
data2 + = mib - > size ;
2022-02-02 01:03:30 +01:00
}
exit :
/* Complete on receiving all the mib packet */
if ( refcount_dec_and_test ( & mib_eth_data - > port_parsed ) )
complete ( & mib_eth_data - > rw_done ) ;
}
static int
qca8k_get_ethtool_stats_eth ( struct dsa_switch * ds , int port , u64 * data )
{
struct dsa_port * dp = dsa_to_port ( ds , port ) ;
struct qca8k_mib_eth_data * mib_eth_data ;
struct qca8k_priv * priv = ds - > priv ;
int ret ;
mib_eth_data = & priv - > mib_eth_data ;
mutex_lock ( & mib_eth_data - > mutex ) ;
reinit_completion ( & mib_eth_data - > rw_done ) ;
mib_eth_data - > req_port = dp - > index ;
mib_eth_data - > data = data ;
refcount_set ( & mib_eth_data - > port_parsed , QCA8K_NUM_PORTS ) ;
mutex_lock ( & priv - > reg_mutex ) ;
/* Send mib autocast request */
ret = regmap_update_bits ( priv - > regmap , QCA8K_REG_MIB ,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY ,
FIELD_PREP ( QCA8K_MIB_FUNC , QCA8K_MIB_CAST ) |
QCA8K_MIB_BUSY ) ;
mutex_unlock ( & priv - > reg_mutex ) ;
if ( ret )
goto exit ;
ret = wait_for_completion_timeout ( & mib_eth_data - > rw_done , QCA8K_ETHERNET_TIMEOUT ) ;
exit :
mutex_unlock ( & mib_eth_data - > mutex ) ;
return ret ;
}
2021-05-14 23:00:13 +02:00
static u32 qca8k_get_phy_flags ( struct dsa_switch * ds , int port )
{
struct qca8k_priv * priv = ds - > priv ;
/* Communicate to the phy internal driver the switch revision.
* Based on the switch revision different values needs to be
* set to the dbg and mmd reg on the phy .
* The first 2 bit are used to communicate the switch revision
* to the phy driver .
*/
if ( port > 0 & & port < 6 )
return priv - > switch_revision ;
return 0 ;
}
2016-09-15 16:26:41 +02:00
static enum dsa_tag_protocol
2020-01-07 21:06:05 -08:00
qca8k_get_tag_protocol ( struct dsa_switch * ds , int port ,
enum dsa_tag_protocol mp )
2016-09-15 16:26:41 +02:00
{
return DSA_TAG_PROTO_QCA ;
}
2022-02-02 01:03:28 +01:00
static void
qca8k_master_change ( struct dsa_switch * ds , const struct net_device * master ,
bool operational )
{
struct dsa_port * dp = master - > dsa_ptr ;
struct qca8k_priv * priv = ds - > priv ;
/* Ethernet MIB/MDIO is only supported for CPU port 0 */
if ( dp - > index ! = 0 )
return ;
2022-02-02 01:03:29 +01:00
mutex_lock ( & priv - > mgmt_eth_data . mutex ) ;
2022-02-02 01:03:30 +01:00
mutex_lock ( & priv - > mib_eth_data . mutex ) ;
2022-02-02 01:03:29 +01:00
2022-02-02 01:03:28 +01:00
priv - > mgmt_master = operational ? ( struct net_device * ) master : NULL ;
2022-02-02 01:03:29 +01:00
2022-02-02 01:03:30 +01:00
mutex_unlock ( & priv - > mib_eth_data . mutex ) ;
2022-02-02 01:03:29 +01:00
mutex_unlock ( & priv - > mgmt_eth_data . mutex ) ;
}
static int qca8k_connect_tag_protocol ( struct dsa_switch * ds ,
enum dsa_tag_protocol proto )
{
struct qca_tagger_data * tagger_data ;
switch ( proto ) {
case DSA_TAG_PROTO_QCA :
tagger_data = ds - > tagger_data ;
tagger_data - > rw_reg_ack_handler = qca8k_rw_reg_ack_handler ;
2022-02-02 01:03:30 +01:00
tagger_data - > mib_autocast_handler = qca8k_mib_autocast_handler ;
2022-02-02 01:03:29 +01:00
break ;
default :
return - EOPNOTSUPP ;
}
return 0 ;
2022-02-02 01:03:28 +01:00
}
2022-02-17 18:30:40 +00:00
static int
qca8k_setup ( struct dsa_switch * ds )
{
struct qca8k_priv * priv = ( struct qca8k_priv * ) ds - > priv ;
int cpu_port , ret , i ;
u32 mask ;
cpu_port = qca8k_find_cpu_port ( ds ) ;
if ( cpu_port < 0 ) {
dev_err ( priv - > dev , " No cpu port configured in both cpu port0 and port6 " ) ;
return cpu_port ;
}
/* Parse CPU port config to be later used in phy_link mac_config */
ret = qca8k_parse_port_config ( priv ) ;
if ( ret )
return ret ;
ret = qca8k_setup_mdio_bus ( priv ) ;
if ( ret )
return ret ;
ret = qca8k_setup_of_pws_reg ( priv ) ;
if ( ret )
return ret ;
ret = qca8k_setup_mac_pwr_sel ( priv ) ;
if ( ret )
return ret ;
2022-02-17 18:30:51 +00:00
qca8k_setup_pcs ( priv , & priv - > pcs_port_0 , 0 ) ;
qca8k_setup_pcs ( priv , & priv - > pcs_port_6 , 6 ) ;
2022-02-17 18:30:40 +00:00
/* Make sure MAC06 is disabled */
ret = regmap_clear_bits ( priv - > regmap , QCA8K_REG_PORT0_PAD_CTRL ,
QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN ) ;
if ( ret ) {
dev_err ( priv - > dev , " failed disabling MAC06 exchange " ) ;
return ret ;
}
/* Enable CPU Port */
ret = regmap_set_bits ( priv - > regmap , QCA8K_REG_GLOBAL_FW_CTRL0 ,
QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN ) ;
if ( ret ) {
dev_err ( priv - > dev , " failed enabling CPU port " ) ;
return ret ;
}
/* Enable MIB counters */
ret = qca8k_mib_init ( priv ) ;
if ( ret )
dev_warn ( priv - > dev , " mib init failed " ) ;
/* Initial setup of all ports */
for ( i = 0 ; i < QCA8K_NUM_PORTS ; i + + ) {
/* Disable forwarding by default on all ports */
ret = qca8k_rmw ( priv , QCA8K_PORT_LOOKUP_CTRL ( i ) ,
QCA8K_PORT_LOOKUP_MEMBER , 0 ) ;
if ( ret )
return ret ;
/* Enable QCA header mode on all cpu ports */
if ( dsa_is_cpu_port ( ds , i ) ) {
ret = qca8k_write ( priv , QCA8K_REG_PORT_HDR_CTRL ( i ) ,
FIELD_PREP ( QCA8K_PORT_HDR_CTRL_TX_MASK , QCA8K_PORT_HDR_CTRL_ALL ) |
FIELD_PREP ( QCA8K_PORT_HDR_CTRL_RX_MASK , QCA8K_PORT_HDR_CTRL_ALL ) ) ;
if ( ret ) {
dev_err ( priv - > dev , " failed enabling QCA header mode " ) ;
return ret ;
}
}
/* Disable MAC by default on all user ports */
if ( dsa_is_user_port ( ds , i ) )
qca8k_port_set_status ( priv , i , 0 ) ;
}
/* Forward all unknown frames to CPU port for Linux processing
* Notice that in multi - cpu config only one port should be set
* for igmp , unknown , multicast and broadcast packet
*/
ret = qca8k_write ( priv , QCA8K_REG_GLOBAL_FW_CTRL1 ,
FIELD_PREP ( QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK , BIT ( cpu_port ) ) |
FIELD_PREP ( QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK , BIT ( cpu_port ) ) |
FIELD_PREP ( QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK , BIT ( cpu_port ) ) |
FIELD_PREP ( QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK , BIT ( cpu_port ) ) ) ;
if ( ret )
return ret ;
/* Setup connection between CPU port & user ports
* Configure specific switch configuration for ports
*/
for ( i = 0 ; i < QCA8K_NUM_PORTS ; i + + ) {
/* CPU port gets connected to all user ports of the switch */
if ( dsa_is_cpu_port ( ds , i ) ) {
ret = qca8k_rmw ( priv , QCA8K_PORT_LOOKUP_CTRL ( i ) ,
QCA8K_PORT_LOOKUP_MEMBER , dsa_user_ports ( ds ) ) ;
if ( ret )
return ret ;
}
/* Individual user ports get connected to CPU port only */
if ( dsa_is_user_port ( ds , i ) ) {
ret = qca8k_rmw ( priv , QCA8K_PORT_LOOKUP_CTRL ( i ) ,
QCA8K_PORT_LOOKUP_MEMBER ,
BIT ( cpu_port ) ) ;
if ( ret )
return ret ;
/* Enable ARP Auto-learning by default */
ret = regmap_set_bits ( priv - > regmap , QCA8K_PORT_LOOKUP_CTRL ( i ) ,
QCA8K_PORT_LOOKUP_LEARN ) ;
if ( ret )
return ret ;
/* For port based vlans to work we need to set the
* default egress vid
*/
ret = qca8k_rmw ( priv , QCA8K_EGRESS_VLAN ( i ) ,
QCA8K_EGREES_VLAN_PORT_MASK ( i ) ,
QCA8K_EGREES_VLAN_PORT ( i , QCA8K_PORT_VID_DEF ) ) ;
if ( ret )
return ret ;
ret = qca8k_write ( priv , QCA8K_REG_PORT_VLAN_CTRL0 ( i ) ,
QCA8K_PORT_VLAN_CVID ( QCA8K_PORT_VID_DEF ) |
QCA8K_PORT_VLAN_SVID ( QCA8K_PORT_VID_DEF ) ) ;
if ( ret )
return ret ;
}
/* The port 5 of the qca8337 have some problem in flood condition. The
* original legacy driver had some specific buffer and priority settings
* for the different port suggested by the QCA switch team . Add this
* missing settings to improve switch stability under load condition .
* This problem is limited to qca8337 and other qca8k switch are not affected .
*/
if ( priv - > switch_id = = QCA8K_ID_QCA8337 ) {
switch ( i ) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports .
*/
case 0 :
case 5 :
case 6 :
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0 ( 0x3 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1 ( 0x4 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2 ( 0x4 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3 ( 0x4 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI4 ( 0x6 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI5 ( 0x8 ) |
QCA8K_PORT_HOL_CTRL0_EG_PORT ( 0x1e ) ;
break ;
default :
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0 ( 0x3 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1 ( 0x4 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2 ( 0x6 ) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3 ( 0x8 ) |
QCA8K_PORT_HOL_CTRL0_EG_PORT ( 0x19 ) ;
}
qca8k_write ( priv , QCA8K_REG_PORT_HOL_CTRL0 ( i ) , mask ) ;
mask = QCA8K_PORT_HOL_CTRL1_ING ( 0x6 ) |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN ;
qca8k_rmw ( priv , QCA8K_REG_PORT_HOL_CTRL1 ( i ) ,
QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN ,
mask ) ;
}
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
if ( priv - > switch_id = = QCA8K_ID_QCA8327 ) {
mask = QCA8K_GLOBAL_FC_GOL_XON_THRES ( 288 ) |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES ( 496 ) ;
qca8k_rmw ( priv , QCA8K_REG_GLOBAL_FC_THRESH ,
QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK ,
mask ) ;
}
/* Setup our port MTUs to match power on defaults */
ret = qca8k_write ( priv , QCA8K_MAX_FRAME_SIZE , ETH_FRAME_LEN + ETH_FCS_LEN ) ;
if ( ret )
dev_warn ( priv - > dev , " failed setting MTU settings " ) ;
/* Flush the FDB table */
qca8k_fdb_flush ( priv ) ;
/* Set min a max ageing value supported */
ds - > ageing_time_min = 7000 ;
ds - > ageing_time_max = 458745000 ;
/* Set max number of LAGs supported */
ds - > num_lag_ids = QCA8K_NUM_LAGS ;
return 0 ;
}
2017-01-08 14:52:08 -08:00
static const struct dsa_switch_ops qca8k_switch_ops = {
2016-09-15 16:26:41 +02:00
. get_tag_protocol = qca8k_get_tag_protocol ,
. setup = qca8k_setup ,
. get_strings = qca8k_get_strings ,
. get_ethtool_stats = qca8k_get_ethtool_stats ,
. get_sset_count = qca8k_get_sset_count ,
2021-11-22 16:23:47 +01:00
. set_ageing_time = qca8k_set_ageing_time ,
2017-08-01 16:32:41 -04:00
. get_mac_eee = qca8k_get_mac_eee ,
. set_mac_eee = qca8k_set_mac_eee ,
2016-09-15 16:26:41 +02:00
. port_enable = qca8k_port_enable ,
. port_disable = qca8k_port_disable ,
2020-07-18 17:32:14 +01:00
. port_change_mtu = qca8k_port_change_mtu ,
. port_max_mtu = qca8k_port_max_mtu ,
2016-09-15 16:26:41 +02:00
. port_stp_state_set = qca8k_port_stp_state_set ,
. port_bridge_join = qca8k_port_bridge_join ,
. port_bridge_leave = qca8k_port_bridge_leave ,
2021-11-22 16:23:46 +01:00
. port_fast_age = qca8k_port_fast_age ,
2016-09-15 16:26:41 +02:00
. port_fdb_add = qca8k_port_fdb_add ,
. port_fdb_del = qca8k_port_fdb_del ,
. port_fdb_dump = qca8k_port_fdb_dump ,
2021-11-22 16:23:48 +01:00
. port_mdb_add = qca8k_port_mdb_add ,
. port_mdb_del = qca8k_port_mdb_del ,
2021-11-23 03:59:10 +01:00
. port_mirror_add = qca8k_port_mirror_add ,
. port_mirror_del = qca8k_port_mirror_del ,
2020-08-01 18:06:46 +01:00
. port_vlan_filtering = qca8k_port_vlan_filtering ,
. port_vlan_add = qca8k_port_vlan_add ,
. port_vlan_del = qca8k_port_vlan_del ,
2022-02-02 10:24:23 +00:00
. phylink_get_caps = qca8k_phylink_get_caps ,
2022-02-17 18:30:51 +00:00
. phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs ,
2020-06-20 11:30:32 +01:00
. phylink_mac_config = qca8k_phylink_mac_config ,
. phylink_mac_link_down = qca8k_phylink_mac_link_down ,
. phylink_mac_link_up = qca8k_phylink_mac_link_up ,
2021-05-14 23:00:13 +02:00
. get_phy_flags = qca8k_get_phy_flags ,
2021-11-23 03:59:11 +01:00
. port_lag_join = qca8k_port_lag_join ,
. port_lag_leave = qca8k_port_lag_leave ,
2022-02-02 01:03:28 +01:00
. master_state_change = qca8k_master_change ,
2022-02-02 01:03:29 +01:00
. connect_tag_protocol = qca8k_connect_tag_protocol ,
2016-09-15 16:26:41 +02:00
} ;
static int
qca8k_sw_probe ( struct mdio_device * mdiodev )
{
struct qca8k_priv * priv ;
2021-05-14 23:00:04 +02:00
int ret ;
2016-09-15 16:26:41 +02:00
/* allocate the private data struct so that we can probe the switches
* ID register
*/
priv = devm_kzalloc ( & mdiodev - > dev , sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
priv - > bus = mdiodev - > bus ;
2018-05-23 08:20:22 +02:00
priv - > dev = & mdiodev - > dev ;
2022-09-04 23:53:19 +02:00
priv - > info = of_device_get_match_data ( priv - > dev ) ;
2016-09-15 16:26:41 +02:00
2019-06-25 10:41:51 +02:00
priv - > reset_gpio = devm_gpiod_get_optional ( priv - > dev , " reset " ,
GPIOD_ASIS ) ;
if ( IS_ERR ( priv - > reset_gpio ) )
return PTR_ERR ( priv - > reset_gpio ) ;
if ( priv - > reset_gpio ) {
gpiod_set_value_cansleep ( priv - > reset_gpio , 1 ) ;
/* The active low duration must be greater than 10 ms
* and checkpatch . pl wants 20 ms .
*/
msleep ( 20 ) ;
gpiod_set_value_cansleep ( priv - > reset_gpio , 0 ) ;
}
2021-11-22 16:23:43 +01:00
/* Start by setting up the register mapping */
priv - > regmap = devm_regmap_init ( & mdiodev - > dev , NULL , priv ,
& qca8k_regmap_config ) ;
if ( IS_ERR ( priv - > regmap ) ) {
dev_err ( priv - > dev , " regmap initialization failed " ) ;
return PTR_ERR ( priv - > regmap ) ;
}
2022-02-02 01:03:32 +01:00
priv - > mdio_cache . page = 0xffff ;
2022-02-02 01:03:33 +01:00
priv - > mdio_cache . lo = 0xffff ;
priv - > mdio_cache . hi = 0xffff ;
2022-02-02 01:03:32 +01:00
2021-05-14 23:00:04 +02:00
/* Check the detected switch id */
ret = qca8k_read_switch_id ( priv ) ;
if ( ret )
return ret ;
2016-09-15 16:26:41 +02:00
2020-06-03 13:31:39 +02:00
priv - > ds = devm_kzalloc ( & mdiodev - > dev , sizeof ( * priv - > ds ) , GFP_KERNEL ) ;
2016-09-15 16:26:41 +02:00
if ( ! priv - > ds )
return - ENOMEM ;
2022-02-02 01:03:29 +01:00
mutex_init ( & priv - > mgmt_eth_data . mutex ) ;
init_completion ( & priv - > mgmt_eth_data . rw_done ) ;
2022-02-02 01:03:30 +01:00
mutex_init ( & priv - > mib_eth_data . mutex ) ;
init_completion ( & priv - > mib_eth_data . rw_done ) ;
2019-10-21 16:51:30 -04:00
priv - > ds - > dev = & mdiodev - > dev ;
2019-10-24 15:46:58 +02:00
priv - > ds - > num_ports = QCA8K_NUM_PORTS ;
2016-09-15 16:26:41 +02:00
priv - > ds - > priv = priv ;
2022-04-16 01:30:15 +02:00
priv - > ds - > ops = & qca8k_switch_ops ;
2016-09-15 16:26:41 +02:00
mutex_init ( & priv - > reg_mutex ) ;
dev_set_drvdata ( & mdiodev - > dev , priv ) ;
2017-05-26 18:12:51 -04:00
return dsa_register_switch ( priv - > ds ) ;
2016-09-15 16:26:41 +02:00
}
static void
qca8k_sw_remove ( struct mdio_device * mdiodev )
{
struct qca8k_priv * priv = dev_get_drvdata ( & mdiodev - > dev ) ;
int i ;
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
if ( ! priv )
return ;
2016-09-15 16:26:41 +02:00
for ( i = 0 ; i < QCA8K_NUM_PORTS ; i + + )
qca8k_port_set_status ( priv , i , 0 ) ;
dsa_unregister_switch ( priv - > ds ) ;
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
}
static void qca8k_sw_shutdown ( struct mdio_device * mdiodev )
{
struct qca8k_priv * priv = dev_get_drvdata ( & mdiodev - > dev ) ;
if ( ! priv )
return ;
dsa_switch_shutdown ( priv - > ds ) ;
dev_set_drvdata ( & mdiodev - > dev , NULL ) ;
2016-09-15 16:26:41 +02:00
}
# ifdef CONFIG_PM_SLEEP
static void
qca8k_set_pm ( struct qca8k_priv * priv , int enable )
{
2022-04-16 01:30:13 +02:00
int port ;
2016-09-15 16:26:41 +02:00
2022-04-16 01:30:13 +02:00
for ( port = 0 ; port < QCA8K_NUM_PORTS ; port + + ) {
/* Do not enable on resume if the port was
* disabled before .
*/
if ( ! ( priv - > port_enabled_map & BIT ( port ) ) )
2016-09-15 16:26:41 +02:00
continue ;
2022-04-16 01:30:13 +02:00
qca8k_port_set_status ( priv , port , enable ) ;
2016-09-15 16:26:41 +02:00
}
}
static int qca8k_suspend ( struct device * dev )
{
2018-10-21 22:00:13 +02:00
struct qca8k_priv * priv = dev_get_drvdata ( dev ) ;
2016-09-15 16:26:41 +02:00
qca8k_set_pm ( priv , 0 ) ;
return dsa_switch_suspend ( priv - > ds ) ;
}
static int qca8k_resume ( struct device * dev )
{
2018-10-21 22:00:13 +02:00
struct qca8k_priv * priv = dev_get_drvdata ( dev ) ;
2016-09-15 16:26:41 +02:00
qca8k_set_pm ( priv , 1 ) ;
return dsa_switch_resume ( priv - > ds ) ;
}
# endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS ( qca8k_pm_ops ,
qca8k_suspend , qca8k_resume ) ;
2022-07-27 13:35:11 +02:00
static const struct qca8k_info_ops qca8xxx_ops = {
. autocast_mib = qca8k_get_ethtool_stats_eth ,
2022-07-27 13:35:14 +02:00
. read_eth = qca8k_read_eth ,
. write_eth = qca8k_write_eth ,
2022-07-27 13:35:11 +02:00
} ;
2021-10-14 00:39:17 +02:00
static const struct qca8k_match_data qca8327 = {
. id = QCA8K_ID_QCA8327 ,
. reduced_package = true ,
2021-11-22 16:23:45 +01:00
. mib_count = QCA8K_QCA832X_MIB_COUNT ,
2022-07-27 13:35:11 +02:00
. ops = & qca8xxx_ops ,
2021-10-14 00:39:17 +02:00
} ;
static const struct qca8k_match_data qca8328 = {
2021-05-14 22:59:59 +02:00
. id = QCA8K_ID_QCA8327 ,
2021-11-22 16:23:45 +01:00
. mib_count = QCA8K_QCA832X_MIB_COUNT ,
2022-07-27 13:35:11 +02:00
. ops = & qca8xxx_ops ,
2021-05-14 22:59:59 +02:00
} ;
static const struct qca8k_match_data qca833x = {
. id = QCA8K_ID_QCA8337 ,
2021-11-22 16:23:45 +01:00
. mib_count = QCA8K_QCA833X_MIB_COUNT ,
2022-07-27 13:35:11 +02:00
. ops = & qca8xxx_ops ,
2021-05-14 22:59:59 +02:00
} ;
2016-09-15 16:26:41 +02:00
static const struct of_device_id qca8k_of_match [ ] = {
2021-10-14 00:39:17 +02:00
{ . compatible = " qca,qca8327 " , . data = & qca8327 } ,
{ . compatible = " qca,qca8328 " , . data = & qca8328 } ,
2021-05-14 22:59:59 +02:00
{ . compatible = " qca,qca8334 " , . data = & qca833x } ,
{ . compatible = " qca,qca8337 " , . data = & qca833x } ,
2016-09-15 16:26:41 +02:00
{ /* sentinel */ } ,
} ;
static struct mdio_driver qca8kmdio_driver = {
. probe = qca8k_sw_probe ,
. remove = qca8k_sw_remove ,
net: dsa: be compatible with masters which unregister on shutdown
Lino reports that on his system with bcmgenet as DSA master and KSZ9897
as a switch, rebooting or shutting down never works properly.
What does the bcmgenet driver have special to trigger this, that other
DSA masters do not? It has an implementation of ->shutdown which simply
calls its ->remove implementation. Otherwise said, it unregisters its
network interface on shutdown.
This message can be seen in a loop, and it hangs the reboot process there:
unregister_netdevice: waiting for eth0 to become free. Usage count = 3
So why 3?
A usage count of 1 is normal for a registered network interface, and any
virtual interface which links itself as an upper of that will increment
it via dev_hold. In the case of DSA, this is the call path:
dsa_slave_create
-> netdev_upper_dev_link
-> __netdev_upper_dev_link
-> __netdev_adjacent_dev_insert
-> dev_hold
So a DSA switch with 3 interfaces will result in a usage count elevated
by two, and netdev_wait_allrefs will wait until they have gone away.
Other stacked interfaces, like VLAN, watch NETDEV_UNREGISTER events and
delete themselves, but DSA cannot just vanish and go poof, at most it
can unbind itself from the switch devices, but that must happen strictly
earlier compared to when the DSA master unregisters its net_device, so
reacting on the NETDEV_UNREGISTER event is way too late.
It seems that it is a pretty established pattern to have a driver's
->shutdown hook redirect to its ->remove hook, so the same code is
executed regardless of whether the driver is unbound from the device, or
the system is just shutting down. As Florian puts it, it is quite a big
hammer for bcmgenet to unregister its net_device during shutdown, but
having a common code path with the driver unbind helps ensure it is well
tested.
So DSA, for better or for worse, has to live with that and engage in an
arms race of implementing the ->shutdown hook too, from all individual
drivers, and do something sane when paired with masters that unregister
their net_device there. The only sane thing to do, of course, is to
unlink from the master.
However, complications arise really quickly.
The pattern of redirecting ->shutdown to ->remove is not unique to
bcmgenet or even to net_device drivers. In fact, SPI controllers do it
too (see dspi_shutdown -> dspi_remove), and presumably, I2C controllers
and MDIO controllers do it too (this is something I have not researched
too deeply, but even if this is not the case today, it is certainly
plausible to happen in the future, and must be taken into consideration).
Since DSA switches might be SPI devices, I2C devices, MDIO devices, the
insane implication is that for the exact same DSA switch device, we
might have both ->shutdown and ->remove getting called.
So we need to do something with that insane environment. The pattern
I've come up with is "if this, then not that", so if either ->shutdown
or ->remove gets called, we set the device's drvdata to NULL, and in the
other hook, we check whether the drvdata is NULL and just do nothing.
This is probably not necessary for platform devices, just for devices on
buses, but I would really insist for consistency among drivers, because
when code is copy-pasted, it is not always copy-pasted from the best
sources.
So depending on whether the DSA switch's ->remove or ->shutdown will get
called first, we cannot really guarantee even for the same driver if
rebooting will result in the same code path on all platforms. But
nonetheless, we need to do something minimally reasonable on ->shutdown
too to fix the bug. Of course, the ->remove will do more (a full
teardown of the tree, with all data structures freed, and this is why
the bug was not caught for so long). The new ->shutdown method is kept
separate from dsa_unregister_switch not because we couldn't have
unregistered the switch, but simply in the interest of doing something
quick and to the point.
The big question is: does the DSA switch's ->shutdown get called earlier
than the DSA master's ->shutdown? If not, there is still a risk that we
might still trigger the WARN_ON in unregister_netdevice that says we are
attempting to unregister a net_device which has uppers. That's no good.
Although the reference to the master net_device won't physically go away
even if DSA's ->shutdown comes afterwards, remember we have a dev_hold
on it.
The answer to that question lies in this comment above device_link_add:
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
so the fact that DSA uses device_link_add towards its master is not
exactly for nothing. device_shutdown() walks devices_kset from the back,
so this is our guarantee that DSA's shutdown happens before the master's
shutdown.
Fixes: 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings")
Link: https://lore.kernel.org/netdev/20210909095324.12978-1-LinoSanfilippo@gmx.de/
Reported-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-09-17 16:34:33 +03:00
. shutdown = qca8k_sw_shutdown ,
2016-09-15 16:26:41 +02:00
. mdiodrv . driver = {
. name = " qca8k " ,
. of_match_table = qca8k_of_match ,
. pm = & qca8k_pm_ops ,
} ,
} ;
2016-09-21 15:05:05 +00:00
mdio_module_driver ( qca8kmdio_driver ) ;
2016-09-15 16:26:41 +02:00
MODULE_AUTHOR ( " Mathieu Olivari, John Crispin <john@phrozen.org> " ) ;
MODULE_DESCRIPTION ( " Driver for QCA8K ethernet switch family " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_ALIAS ( " platform:qca8k " ) ;