2013-01-08 20:06:23 +00:00
/*
* Driver for ( BCM4706 ) ? GBit MAC core on BCMA bus .
*
* Copyright ( C ) 2012 Rafał Miłecki < zajec5 @ gmail . com >
*
* Licensed under the GNU / GPL . See COPYING for details .
*/
2016-07-07 19:08:57 -04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/bcma/bcma.h>
2013-01-08 20:06:23 +00:00
# include <linux/etherdevice.h>
2017-03-29 17:17:31 +02:00
# include <linux/interrupt.h>
2014-12-01 07:58:18 +01:00
# include <linux/bcm47xx_nvram.h>
2017-02-07 15:02:58 -08:00
# include <linux/phy.h>
# include <linux/phy_fixed.h>
2017-11-10 11:33:25 -08:00
# include <net/dsa.h>
2016-07-07 19:08:57 -04:00
# include "bgmac.h"
2013-01-08 20:06:23 +00:00
2016-07-07 19:08:57 -04:00
static bool bgmac_wait_value ( struct bgmac * bgmac , u16 reg , u32 mask ,
2013-01-08 20:06:23 +00:00
u32 value , int timeout )
{
u32 val ;
int i ;
for ( i = 0 ; i < timeout / 10 ; i + + ) {
2016-07-07 19:08:57 -04:00
val = bgmac_read ( bgmac , reg ) ;
2013-01-08 20:06:23 +00:00
if ( ( val & mask ) = = value )
return true ;
udelay ( 10 ) ;
}
2016-07-07 19:08:57 -04:00
dev_err ( bgmac - > dev , " Timeout waiting for reg 0x%X \n " , reg ) ;
2013-01-08 20:06:23 +00:00
return false ;
}
/**************************************************
* DMA
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static void bgmac_dma_tx_reset ( struct bgmac * bgmac , struct bgmac_dma_ring * ring )
{
u32 val ;
int i ;
if ( ! ring - > mmio_base )
return ;
/* Suspend DMA TX ring first.
* bgmac_wait_value doesn ' t support waiting for any of few values , so
* implement whole loop here .
*/
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_CTL ,
BGMAC_DMA_TX_SUSPEND ) ;
for ( i = 0 ; i < 10000 / 10 ; i + + ) {
val = bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_STATUS ) ;
val & = BGMAC_DMA_TX_STAT ;
if ( val = = BGMAC_DMA_TX_STAT_DISABLED | |
val = = BGMAC_DMA_TX_STAT_IDLEWAIT | |
val = = BGMAC_DMA_TX_STAT_STOPPED ) {
i = 0 ;
break ;
}
udelay ( 10 ) ;
}
if ( i )
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X) \n " ,
ring - > mmio_base , val ) ;
2013-01-08 20:06:23 +00:00
/* Remove SUSPEND bit */
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_CTL , 0 ) ;
2016-07-07 19:08:57 -04:00
if ( ! bgmac_wait_value ( bgmac ,
2013-01-08 20:06:23 +00:00
ring - > mmio_base + BGMAC_DMA_TX_STATUS ,
BGMAC_DMA_TX_STAT , BGMAC_DMA_TX_STAT_DISABLED ,
10000 ) ) {
2016-07-07 19:08:53 -04:00
dev_warn ( bgmac - > dev , " DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us \n " ,
ring - > mmio_base ) ;
2013-01-08 20:06:23 +00:00
udelay ( 300 ) ;
val = bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_STATUS ) ;
if ( ( val & BGMAC_DMA_TX_STAT ) ! = BGMAC_DMA_TX_STAT_DISABLED )
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Reset of DMA TX ring 0x%X failed \n " ,
ring - > mmio_base ) ;
2013-01-08 20:06:23 +00:00
}
}
static void bgmac_dma_tx_enable ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring )
{
u32 ctl ;
ctl = bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_CTL ) ;
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_TX_MASK_SETUP ) {
2014-01-05 01:10:44 +01:00
ctl & = ~ BGMAC_DMA_TX_BL_MASK ;
ctl | = BGMAC_DMA_TX_BL_128 < < BGMAC_DMA_TX_BL_SHIFT ;
ctl & = ~ BGMAC_DMA_TX_MR_MASK ;
ctl | = BGMAC_DMA_TX_MR_2 < < BGMAC_DMA_TX_MR_SHIFT ;
ctl & = ~ BGMAC_DMA_TX_PC_MASK ;
ctl | = BGMAC_DMA_TX_PC_16 < < BGMAC_DMA_TX_PC_SHIFT ;
ctl & = ~ BGMAC_DMA_TX_PT_MASK ;
ctl | = BGMAC_DMA_TX_PT_8 < < BGMAC_DMA_TX_PT_SHIFT ;
}
2013-01-08 20:06:23 +00:00
ctl | = BGMAC_DMA_TX_ENABLE ;
ctl | = BGMAC_DMA_TX_PARITY_DISABLE ;
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_CTL , ctl ) ;
}
2015-03-23 12:35:37 +01:00
static void
bgmac_dma_tx_add_buf ( struct bgmac * bgmac , struct bgmac_dma_ring * ring ,
int i , int len , u32 ctl0 )
{
struct bgmac_slot_info * slot ;
struct bgmac_dma_desc * dma_desc ;
u32 ctl1 ;
2015-04-14 12:08:02 +02:00
if ( i = = BGMAC_TX_RING_SLOTS - 1 )
2015-03-23 12:35:37 +01:00
ctl0 | = BGMAC_DESC_CTL0_EOT ;
ctl1 = len & BGMAC_DESC_CTL1_LEN ;
slot = & ring - > slots [ i ] ;
dma_desc = & ring - > cpu_base [ i ] ;
dma_desc - > addr_low = cpu_to_le32 ( lower_32_bits ( slot - > dma_addr ) ) ;
dma_desc - > addr_high = cpu_to_le32 ( upper_32_bits ( slot - > dma_addr ) ) ;
dma_desc - > ctl0 = cpu_to_le32 ( ctl0 ) ;
dma_desc - > ctl1 = cpu_to_le32 ( ctl1 ) ;
}
2013-01-08 20:06:23 +00:00
static netdev_tx_t bgmac_dma_tx_add ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring ,
struct sk_buff * skb )
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2013-01-08 20:06:23 +00:00
struct net_device * net_dev = bgmac - > net_dev ;
2015-04-14 12:07:54 +02:00
int index = ring - > end % BGMAC_TX_RING_SLOTS ;
struct bgmac_slot_info * slot = & ring - > slots [ index ] ;
2015-03-23 12:35:37 +01:00
int nr_frags ;
u32 flags ;
int i ;
2013-01-08 20:06:23 +00:00
if ( skb - > len > BGMAC_DESC_CTL1_LEN ) {
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " Too long skb (%d) \n " , skb - > len ) ;
2015-03-23 12:35:37 +01:00
goto err_drop ;
2013-01-08 20:06:23 +00:00
}
2015-03-23 12:35:37 +01:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
skb_checksum_help ( skb ) ;
nr_frags = skb_shinfo ( skb ) - > nr_frags ;
2015-04-14 12:07:54 +02:00
/* ring->end - ring->start will return the number of valid slots,
* even when ring - > end overflows
*/
if ( ring - > end - ring - > start + nr_frags + 1 > = BGMAC_TX_RING_SLOTS ) {
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " TX ring is full, queue should be stopped! \n " ) ;
2013-01-08 20:06:23 +00:00
netif_stop_queue ( net_dev ) ;
return NETDEV_TX_BUSY ;
}
2015-03-23 12:35:37 +01:00
slot - > dma_addr = dma_map_single ( dma_dev , skb - > data , skb_headlen ( skb ) ,
2013-01-08 20:06:23 +00:00
DMA_TO_DEVICE ) ;
2015-03-23 12:35:37 +01:00
if ( unlikely ( dma_mapping_error ( dma_dev , slot - > dma_addr ) ) )
goto err_dma_head ;
2013-01-08 20:06:23 +00:00
2015-03-23 12:35:37 +01:00
flags = BGMAC_DESC_CTL0_SOF ;
if ( ! nr_frags )
flags | = BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC ;
2013-01-08 20:06:23 +00:00
2015-03-23 12:35:37 +01:00
bgmac_dma_tx_add_buf ( bgmac , ring , index , skb_headlen ( skb ) , flags ) ;
flags = 0 ;
for ( i = 0 ; i < nr_frags ; i + + ) {
2019-07-22 20:08:25 -07:00
skb_frag_t * frag = & skb_shinfo ( skb ) - > frags [ i ] ;
2015-03-23 12:35:37 +01:00
int len = skb_frag_size ( frag ) ;
index = ( index + 1 ) % BGMAC_TX_RING_SLOTS ;
slot = & ring - > slots [ index ] ;
slot - > dma_addr = skb_frag_dma_map ( dma_dev , frag , 0 ,
len , DMA_TO_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( dma_dev , slot - > dma_addr ) ) )
goto err_dma ;
if ( i = = nr_frags - 1 )
flags | = BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC ;
bgmac_dma_tx_add_buf ( bgmac , ring , index , len , flags ) ;
}
slot - > skb = skb ;
2015-04-14 12:07:54 +02:00
ring - > end + = nr_frags + 1 ;
2013-09-29 13:54:58 +02:00
netdev_sent_queue ( net_dev , skb - > len ) ;
2013-01-08 20:06:23 +00:00
wmb ( ) ;
/* Increase ring->end to point empty slot. We tell hardware the first
* slot it should * not * read .
*/
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_INDEX ,
2013-09-15 23:13:18 +02:00
ring - > index_base +
2015-04-14 12:07:54 +02:00
( ring - > end % BGMAC_TX_RING_SLOTS ) *
sizeof ( struct bgmac_dma_desc ) ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:54 +02:00
if ( ring - > end - ring - > start > = BGMAC_TX_RING_SLOTS - 8 )
2013-01-08 20:06:23 +00:00
netif_stop_queue ( net_dev ) ;
return NETDEV_TX_OK ;
2015-03-23 12:35:37 +01:00
err_dma :
dma_unmap_single ( dma_dev , slot - > dma_addr , skb_headlen ( skb ) ,
DMA_TO_DEVICE ) ;
2016-07-15 15:42:52 -07:00
while ( i - - > 0 ) {
2015-03-23 12:35:37 +01:00
int index = ( ring - > end + i ) % BGMAC_TX_RING_SLOTS ;
struct bgmac_slot_info * slot = & ring - > slots [ index ] ;
u32 ctl1 = le32_to_cpu ( ring - > cpu_base [ index ] . ctl1 ) ;
int len = ctl1 & BGMAC_DESC_CTL1_LEN ;
dma_unmap_page ( dma_dev , slot - > dma_addr , len , DMA_TO_DEVICE ) ;
}
err_dma_head :
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " Mapping error of skb on ring 0x%X \n " ,
ring - > mmio_base ) ;
2015-03-23 12:35:37 +01:00
err_drop :
2013-01-08 20:06:23 +00:00
dev_kfree_skb ( skb ) ;
2016-06-07 15:06:15 -07:00
net_dev - > stats . tx_dropped + + ;
net_dev - > stats . tx_errors + + ;
2013-01-08 20:06:23 +00:00
return NETDEV_TX_OK ;
}
/* Free transmitted packets */
static void bgmac_dma_tx_free ( struct bgmac * bgmac , struct bgmac_dma_ring * ring )
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2013-01-08 20:06:23 +00:00
int empty_slot ;
2013-09-29 13:54:58 +02:00
unsigned bytes_compl = 0 , pkts_compl = 0 ;
2013-01-08 20:06:23 +00:00
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_STATUS ) ;
empty_slot & = BGMAC_DMA_TX_STATDPTR ;
2013-09-15 23:13:18 +02:00
empty_slot - = ring - > index_base ;
empty_slot & = BGMAC_DMA_TX_STATDPTR ;
2013-01-08 20:06:23 +00:00
empty_slot / = sizeof ( struct bgmac_dma_desc ) ;
2015-04-14 12:07:54 +02:00
while ( ring - > start ! = ring - > end ) {
int slot_idx = ring - > start % BGMAC_TX_RING_SLOTS ;
struct bgmac_slot_info * slot = & ring - > slots [ slot_idx ] ;
2016-06-23 14:23:12 -07:00
u32 ctl0 , ctl1 ;
2015-04-14 12:07:54 +02:00
int len ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:54 +02:00
if ( slot_idx = = empty_slot )
break ;
2015-03-23 12:35:37 +01:00
2016-06-23 14:23:12 -07:00
ctl0 = le32_to_cpu ( ring - > cpu_base [ slot_idx ] . ctl0 ) ;
2015-04-14 12:07:54 +02:00
ctl1 = le32_to_cpu ( ring - > cpu_base [ slot_idx ] . ctl1 ) ;
len = ctl1 & BGMAC_DESC_CTL1_LEN ;
2016-06-23 14:23:12 -07:00
if ( ctl0 & BGMAC_DESC_CTL0_SOF )
2013-01-08 20:06:23 +00:00
/* Unmap no longer used buffer */
2015-03-23 12:35:37 +01:00
dma_unmap_single ( dma_dev , slot - > dma_addr , len ,
DMA_TO_DEVICE ) ;
else
dma_unmap_page ( dma_dev , slot - > dma_addr , len ,
DMA_TO_DEVICE ) ;
2013-01-08 20:06:23 +00:00
2015-03-23 12:35:37 +01:00
if ( slot - > skb ) {
2016-06-07 15:06:15 -07:00
bgmac - > net_dev - > stats . tx_bytes + = slot - > skb - > len ;
bgmac - > net_dev - > stats . tx_packets + + ;
2013-09-29 13:54:58 +02:00
bytes_compl + = slot - > skb - > len ;
pkts_compl + + ;
2013-01-08 20:06:23 +00:00
/* Free memory! :) */
dev_kfree_skb ( slot - > skb ) ;
slot - > skb = NULL ;
}
2015-03-23 12:35:37 +01:00
slot - > dma_addr = 0 ;
2015-04-14 12:07:54 +02:00
ring - > start + + ;
2013-01-08 20:06:23 +00:00
}
2015-03-23 12:35:37 +01:00
if ( ! pkts_compl )
return ;
2013-09-29 13:54:58 +02:00
netdev_completed_queue ( bgmac - > net_dev , pkts_compl , bytes_compl ) ;
2015-03-23 12:35:37 +01:00
if ( netif_queue_stopped ( bgmac - > net_dev ) )
2013-01-08 20:06:23 +00:00
netif_wake_queue ( bgmac - > net_dev ) ;
}
static void bgmac_dma_rx_reset ( struct bgmac * bgmac , struct bgmac_dma_ring * ring )
{
if ( ! ring - > mmio_base )
return ;
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_CTL , 0 ) ;
2016-07-07 19:08:57 -04:00
if ( ! bgmac_wait_value ( bgmac ,
2013-01-08 20:06:23 +00:00
ring - > mmio_base + BGMAC_DMA_RX_STATUS ,
BGMAC_DMA_RX_STAT , BGMAC_DMA_RX_STAT_DISABLED ,
10000 ) )
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Reset of ring 0x%X RX failed \n " ,
ring - > mmio_base ) ;
2013-01-08 20:06:23 +00:00
}
static void bgmac_dma_rx_enable ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring )
{
u32 ctl ;
ctl = bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_CTL ) ;
2016-10-31 13:32:03 -04:00
/* preserve ONLY bits 16-17 from current hardware value */
ctl & = BGMAC_DMA_RX_ADDREXT_MASK ;
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_RX_MASK_SETUP ) {
2014-01-05 01:10:44 +01:00
ctl & = ~ BGMAC_DMA_RX_BL_MASK ;
ctl | = BGMAC_DMA_RX_BL_128 < < BGMAC_DMA_RX_BL_SHIFT ;
ctl & = ~ BGMAC_DMA_RX_PC_MASK ;
ctl | = BGMAC_DMA_RX_PC_8 < < BGMAC_DMA_RX_PC_SHIFT ;
ctl & = ~ BGMAC_DMA_RX_PT_MASK ;
ctl | = BGMAC_DMA_RX_PT_1 < < BGMAC_DMA_RX_PT_SHIFT ;
}
2013-01-08 20:06:23 +00:00
ctl | = BGMAC_DMA_RX_ENABLE ;
ctl | = BGMAC_DMA_RX_PARITY_DISABLE ;
ctl | = BGMAC_DMA_RX_OVERFLOW_CONT ;
ctl | = BGMAC_RX_FRAME_OFFSET < < BGMAC_DMA_RX_FRAME_OFFSET_SHIFT ;
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_CTL , ctl ) ;
}
static int bgmac_dma_rx_skb_for_slot ( struct bgmac * bgmac ,
struct bgmac_slot_info * slot )
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2013-10-29 19:32:01 -07:00
dma_addr_t dma_addr ;
2013-01-08 20:06:23 +00:00
struct bgmac_rx_header * rx ;
2015-03-23 12:35:36 +01:00
void * buf ;
2013-01-08 20:06:23 +00:00
/* Alloc skb */
2015-03-23 12:35:36 +01:00
buf = netdev_alloc_frag ( BGMAC_RX_ALLOC_SIZE ) ;
if ( ! buf )
2013-01-08 20:06:23 +00:00
return - ENOMEM ;
/* Poison - if everything goes fine, hardware will overwrite it */
2015-04-14 12:07:56 +02:00
rx = buf + BGMAC_RX_BUF_OFFSET ;
2013-01-08 20:06:23 +00:00
rx - > len = cpu_to_le16 ( 0xdead ) ;
rx - > flags = cpu_to_le16 ( 0xbeef ) ;
/* Map skb for the DMA */
2015-04-14 12:07:56 +02:00
dma_addr = dma_map_single ( dma_dev , buf + BGMAC_RX_BUF_OFFSET ,
BGMAC_RX_BUF_SIZE , DMA_FROM_DEVICE ) ;
2013-10-29 19:32:01 -07:00
if ( dma_mapping_error ( dma_dev , dma_addr ) ) {
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " DMA mapping error \n " ) ;
2015-03-23 12:35:36 +01:00
put_page ( virt_to_head_page ( buf ) ) ;
2013-01-08 20:06:23 +00:00
return - ENOMEM ;
}
2013-10-29 19:32:01 -07:00
/* Update the slot */
2015-03-23 12:35:36 +01:00
slot - > buf = buf ;
2013-10-29 19:32:01 -07:00
slot - > dma_addr = dma_addr ;
2013-01-08 20:06:23 +00:00
return 0 ;
}
2015-04-14 12:08:01 +02:00
static void bgmac_dma_rx_update_index ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring )
{
dma_wmb ( ) ;
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_INDEX ,
ring - > index_base +
ring - > end * sizeof ( struct bgmac_dma_desc ) ) ;
}
2013-10-28 14:40:29 +01:00
static void bgmac_dma_rx_setup_desc ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring , int desc_idx )
{
struct bgmac_dma_desc * dma_desc = ring - > cpu_base + desc_idx ;
u32 ctl0 = 0 , ctl1 = 0 ;
2015-04-14 12:08:02 +02:00
if ( desc_idx = = BGMAC_RX_RING_SLOTS - 1 )
2013-10-28 14:40:29 +01:00
ctl0 | = BGMAC_DESC_CTL0_EOT ;
ctl1 | = BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN ;
/* Is there any BGMAC device that requires extension? */
/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
* B43_DMA64_DCTL1_ADDREXT_MASK ;
*/
dma_desc - > addr_low = cpu_to_le32 ( lower_32_bits ( ring - > slots [ desc_idx ] . dma_addr ) ) ;
dma_desc - > addr_high = cpu_to_le32 ( upper_32_bits ( ring - > slots [ desc_idx ] . dma_addr ) ) ;
dma_desc - > ctl0 = cpu_to_le32 ( ctl0 ) ;
dma_desc - > ctl1 = cpu_to_le32 ( ctl1 ) ;
2015-04-14 12:08:01 +02:00
ring - > end = desc_idx ;
2013-10-28 14:40:29 +01:00
}
2015-04-14 12:07:57 +02:00
static void bgmac_dma_rx_poison_buf ( struct device * dma_dev ,
struct bgmac_slot_info * slot )
{
struct bgmac_rx_header * rx = slot - > buf + BGMAC_RX_BUF_OFFSET ;
dma_sync_single_for_cpu ( dma_dev , slot - > dma_addr , BGMAC_RX_BUF_SIZE ,
DMA_FROM_DEVICE ) ;
rx - > len = cpu_to_le16 ( 0xdead ) ;
rx - > flags = cpu_to_le16 ( 0xbeef ) ;
dma_sync_single_for_device ( dma_dev , slot - > dma_addr , BGMAC_RX_BUF_SIZE ,
DMA_FROM_DEVICE ) ;
}
2013-01-08 20:06:23 +00:00
static int bgmac_dma_rx_read ( struct bgmac * bgmac , struct bgmac_dma_ring * ring ,
int weight )
{
u32 end_slot ;
int handled = 0 ;
end_slot = bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_STATUS ) ;
end_slot & = BGMAC_DMA_RX_STATDPTR ;
2013-09-15 23:13:18 +02:00
end_slot - = ring - > index_base ;
end_slot & = BGMAC_DMA_RX_STATDPTR ;
2013-01-08 20:06:23 +00:00
end_slot / = sizeof ( struct bgmac_dma_desc ) ;
2015-04-14 12:08:01 +02:00
while ( ring - > start ! = end_slot ) {
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2013-01-08 20:06:23 +00:00
struct bgmac_slot_info * slot = & ring - > slots [ ring - > start ] ;
2015-04-14 12:07:56 +02:00
struct bgmac_rx_header * rx = slot - > buf + BGMAC_RX_BUF_OFFSET ;
2015-03-23 12:35:36 +01:00
struct sk_buff * skb ;
void * buf = slot - > buf ;
2015-04-14 12:07:57 +02:00
dma_addr_t dma_addr = slot - > dma_addr ;
2013-01-08 20:06:23 +00:00
u16 len , flags ;
2015-04-14 12:07:57 +02:00
do {
/* Prepare new skb as replacement */
if ( bgmac_dma_rx_skb_for_slot ( bgmac , slot ) ) {
bgmac_dma_rx_poison_buf ( dma_dev , slot ) ;
break ;
}
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:57 +02:00
/* Unmap buffer to make it accessible to the CPU */
dma_unmap_single ( dma_dev , dma_addr ,
BGMAC_RX_BUF_SIZE , DMA_FROM_DEVICE ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:57 +02:00
/* Get info from the header */
len = le16_to_cpu ( rx - > len ) ;
flags = le16_to_cpu ( rx - > flags ) ;
2013-10-30 08:00:00 +01:00
/* Check for poison and drop or pass the packet */
if ( len = = 0xdead & & flags = = 0xbeef ) {
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " Found poisoned packet at slot %d, DMA issue! \n " ,
ring - > start ) ;
2015-04-14 12:07:57 +02:00
put_page ( virt_to_head_page ( buf ) ) ;
2016-06-07 15:06:15 -07:00
bgmac - > net_dev - > stats . rx_errors + + ;
2013-10-30 08:00:00 +01:00
break ;
}
2015-04-14 12:07:58 +02:00
if ( len > BGMAC_RX_ALLOC_SIZE ) {
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " Found oversized packet at slot %d, DMA issue! \n " ,
ring - > start ) ;
2015-04-14 12:07:58 +02:00
put_page ( virt_to_head_page ( buf ) ) ;
2016-06-07 15:06:15 -07:00
bgmac - > net_dev - > stats . rx_length_errors + + ;
bgmac - > net_dev - > stats . rx_errors + + ;
2015-04-14 12:07:58 +02:00
break ;
}
2013-02-28 07:16:54 +00:00
/* Omit CRC. */
len - = ETH_FCS_LEN ;
2015-03-23 12:35:36 +01:00
skb = build_skb ( buf , BGMAC_RX_ALLOC_SIZE ) ;
2016-01-15 16:07:13 -05:00
if ( unlikely ( ! skb ) ) {
2016-07-07 19:08:53 -04:00
netdev_err ( bgmac - > net_dev , " build_skb failed \n " ) ;
2016-01-13 11:06:41 +08:00
put_page ( virt_to_head_page ( buf ) ) ;
2016-06-07 15:06:15 -07:00
bgmac - > net_dev - > stats . rx_errors + + ;
2016-01-13 11:06:41 +08:00
break ;
}
2015-04-14 12:07:56 +02:00
skb_put ( skb , BGMAC_RX_FRAME_OFFSET +
BGMAC_RX_BUF_OFFSET + len ) ;
skb_pull ( skb , BGMAC_RX_FRAME_OFFSET +
BGMAC_RX_BUF_OFFSET ) ;
2013-01-08 20:06:23 +00:00
2013-10-30 08:00:00 +01:00
skb_checksum_none_assert ( skb ) ;
skb - > protocol = eth_type_trans ( skb , bgmac - > net_dev ) ;
2016-06-07 15:06:15 -07:00
bgmac - > net_dev - > stats . rx_bytes + = len ;
bgmac - > net_dev - > stats . rx_packets + + ;
2015-03-23 12:35:36 +01:00
napi_gro_receive ( & bgmac - > napi , skb ) ;
2013-10-30 08:00:00 +01:00
handled + + ;
} while ( 0 ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:57 +02:00
bgmac_dma_rx_setup_desc ( bgmac , ring , ring - > start ) ;
2013-01-08 20:06:23 +00:00
if ( + + ring - > start > = BGMAC_RX_RING_SLOTS )
ring - > start = 0 ;
if ( handled > = weight ) /* Should never be greater */
break ;
}
2015-04-14 12:08:01 +02:00
bgmac_dma_rx_update_index ( bgmac , ring ) ;
2013-01-08 20:06:23 +00:00
return handled ;
}
/* Does ring support unaligned addressing? */
static bool bgmac_dma_unaligned ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring ,
enum bgmac_dma_ring_type ring_type )
{
switch ( ring_type ) {
case BGMAC_DMA_RING_TX :
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_RINGLO ,
0xff0 ) ;
if ( bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_RINGLO ) )
return true ;
break ;
case BGMAC_DMA_RING_RX :
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_RINGLO ,
0xff0 ) ;
if ( bgmac_read ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_RINGLO ) )
return true ;
break ;
}
return false ;
}
2015-03-23 12:35:36 +01:00
static void bgmac_dma_tx_ring_free ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring )
2013-01-08 20:06:23 +00:00
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2015-03-23 12:35:37 +01:00
struct bgmac_dma_desc * dma_desc = ring - > cpu_base ;
2013-01-08 20:06:23 +00:00
struct bgmac_slot_info * slot ;
int i ;
2015-04-14 12:08:02 +02:00
for ( i = 0 ; i < BGMAC_TX_RING_SLOTS ; i + + ) {
2018-04-01 10:26:30 -07:00
u32 ctl1 = le32_to_cpu ( dma_desc [ i ] . ctl1 ) ;
unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN ;
2015-03-23 12:35:37 +01:00
2013-01-08 20:06:23 +00:00
slot = & ring - > slots [ i ] ;
2015-03-23 12:35:37 +01:00
dev_kfree_skb ( slot - > skb ) ;
if ( ! slot - > dma_addr )
continue ;
if ( slot - > skb )
dma_unmap_single ( dma_dev , slot - > dma_addr ,
len , DMA_TO_DEVICE ) ;
else
dma_unmap_page ( dma_dev , slot - > dma_addr ,
len , DMA_TO_DEVICE ) ;
2013-01-08 20:06:23 +00:00
}
2015-03-23 12:35:36 +01:00
}
static void bgmac_dma_rx_ring_free ( struct bgmac * bgmac ,
struct bgmac_dma_ring * ring )
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2015-03-23 12:35:36 +01:00
struct bgmac_slot_info * slot ;
int i ;
2015-04-14 12:08:02 +02:00
for ( i = 0 ; i < BGMAC_RX_RING_SLOTS ; i + + ) {
2015-03-23 12:35:36 +01:00
slot = & ring - > slots [ i ] ;
2015-04-14 12:07:57 +02:00
if ( ! slot - > dma_addr )
2015-03-23 12:35:36 +01:00
continue ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:57 +02:00
dma_unmap_single ( dma_dev , slot - > dma_addr ,
BGMAC_RX_BUF_SIZE ,
DMA_FROM_DEVICE ) ;
2015-03-23 12:35:36 +01:00
put_page ( virt_to_head_page ( slot - > buf ) ) ;
2015-04-14 12:07:57 +02:00
slot - > dma_addr = 0 ;
2013-01-08 20:06:23 +00:00
}
}
2015-03-23 12:35:36 +01:00
static void bgmac_dma_ring_desc_free ( struct bgmac * bgmac ,
2015-04-14 12:08:02 +02:00
struct bgmac_dma_ring * ring ,
int num_slots )
2015-03-23 12:35:36 +01:00
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2015-03-23 12:35:36 +01:00
int size ;
if ( ! ring - > cpu_base )
return ;
/* Free ring of descriptors */
2015-04-14 12:08:02 +02:00
size = num_slots * sizeof ( struct bgmac_dma_desc ) ;
2015-03-23 12:35:36 +01:00
dma_free_coherent ( dma_dev , size , ring - > cpu_base ,
ring - > dma_base ) ;
}
2015-04-14 12:08:00 +02:00
static void bgmac_dma_cleanup ( struct bgmac * bgmac )
2013-01-08 20:06:23 +00:00
{
int i ;
2015-04-14 12:08:00 +02:00
for ( i = 0 ; i < BGMAC_MAX_TX_RINGS ; i + + )
2015-03-23 12:35:36 +01:00
bgmac_dma_tx_ring_free ( bgmac , & bgmac - > tx_ring [ i ] ) ;
2015-04-14 12:08:00 +02:00
for ( i = 0 ; i < BGMAC_MAX_RX_RINGS ; i + + )
2015-03-23 12:35:36 +01:00
bgmac_dma_rx_ring_free ( bgmac , & bgmac - > rx_ring [ i ] ) ;
2015-04-14 12:08:00 +02:00
}
static void bgmac_dma_free ( struct bgmac * bgmac )
{
int i ;
for ( i = 0 ; i < BGMAC_MAX_TX_RINGS ; i + + )
2015-04-14 12:08:02 +02:00
bgmac_dma_ring_desc_free ( bgmac , & bgmac - > tx_ring [ i ] ,
BGMAC_TX_RING_SLOTS ) ;
2015-04-14 12:08:00 +02:00
for ( i = 0 ; i < BGMAC_MAX_RX_RINGS ; i + + )
2015-04-14 12:08:02 +02:00
bgmac_dma_ring_desc_free ( bgmac , & bgmac - > rx_ring [ i ] ,
BGMAC_RX_RING_SLOTS ) ;
2013-01-08 20:06:23 +00:00
}
static int bgmac_dma_alloc ( struct bgmac * bgmac )
{
2016-07-07 19:08:54 -04:00
struct device * dma_dev = bgmac - > dma_dev ;
2013-01-08 20:06:23 +00:00
struct bgmac_dma_ring * ring ;
static const u16 ring_base [ ] = { BGMAC_DMA_BASE0 , BGMAC_DMA_BASE1 ,
BGMAC_DMA_BASE2 , BGMAC_DMA_BASE3 , } ;
int size ; /* ring size: different for Tx and Rx */
int i ;
BUILD_BUG_ON ( BGMAC_MAX_TX_RINGS > ARRAY_SIZE ( ring_base ) ) ;
BUILD_BUG_ON ( BGMAC_MAX_RX_RINGS > ARRAY_SIZE ( ring_base ) ) ;
2017-07-14 00:34:08 +05:30
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_IDM_MASK ) ) {
if ( ! ( bgmac_idm_read ( bgmac , BCMA_IOST ) & BCMA_IOST_DMA64 ) ) {
dev_err ( bgmac - > dev , " Core does not report 64-bit DMA \n " ) ;
return - ENOTSUPP ;
}
2013-01-08 20:06:23 +00:00
}
for ( i = 0 ; i < BGMAC_MAX_TX_RINGS ; i + + ) {
ring = & bgmac - > tx_ring [ i ] ;
ring - > mmio_base = ring_base [ i ] ;
/* Alloc ring of descriptors */
2015-04-14 12:08:02 +02:00
size = BGMAC_TX_RING_SLOTS * sizeof ( struct bgmac_dma_desc ) ;
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 09:23:09 +01:00
ring - > cpu_base = dma_alloc_coherent ( dma_dev , size ,
& ring - > dma_base ,
GFP_KERNEL ) ;
2013-01-08 20:06:23 +00:00
if ( ! ring - > cpu_base ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Allocation of TX ring 0x%X failed \n " ,
ring - > mmio_base ) ;
2013-01-08 20:06:23 +00:00
goto err_dma_free ;
}
2013-09-15 23:13:18 +02:00
ring - > unaligned = bgmac_dma_unaligned ( bgmac , ring ,
BGMAC_DMA_RING_TX ) ;
if ( ring - > unaligned )
ring - > index_base = lower_32_bits ( ring - > dma_base ) ;
else
ring - > index_base = 0 ;
2013-01-08 20:06:23 +00:00
/* No need to alloc TX slots yet */
}
for ( i = 0 ; i < BGMAC_MAX_RX_RINGS ; i + + ) {
ring = & bgmac - > rx_ring [ i ] ;
ring - > mmio_base = ring_base [ i ] ;
/* Alloc ring of descriptors */
2015-04-14 12:08:02 +02:00
size = BGMAC_RX_RING_SLOTS * sizeof ( struct bgmac_dma_desc ) ;
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 09:23:09 +01:00
ring - > cpu_base = dma_alloc_coherent ( dma_dev , size ,
& ring - > dma_base ,
GFP_KERNEL ) ;
2013-01-08 20:06:23 +00:00
if ( ! ring - > cpu_base ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Allocation of RX ring 0x%X failed \n " ,
ring - > mmio_base ) ;
2013-01-08 20:06:23 +00:00
goto err_dma_free ;
}
2013-09-15 23:13:18 +02:00
ring - > unaligned = bgmac_dma_unaligned ( bgmac , ring ,
BGMAC_DMA_RING_RX ) ;
if ( ring - > unaligned )
ring - > index_base = lower_32_bits ( ring - > dma_base ) ;
else
ring - > index_base = 0 ;
2013-01-08 20:06:23 +00:00
}
return 0 ;
err_dma_free :
bgmac_dma_free ( bgmac ) ;
return - ENOMEM ;
}
2015-04-14 12:08:00 +02:00
static int bgmac_dma_init ( struct bgmac * bgmac )
2013-01-08 20:06:23 +00:00
{
struct bgmac_dma_ring * ring ;
2015-04-14 12:08:00 +02:00
int i , err ;
2013-01-08 20:06:23 +00:00
for ( i = 0 ; i < BGMAC_MAX_TX_RINGS ; i + + ) {
ring = & bgmac - > tx_ring [ i ] ;
2013-09-15 23:13:18 +02:00
if ( ! ring - > unaligned )
bgmac_dma_tx_enable ( bgmac , ring ) ;
2013-01-08 20:06:23 +00:00
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_RINGLO ,
lower_32_bits ( ring - > dma_base ) ) ;
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_TX_RINGHI ,
upper_32_bits ( ring - > dma_base ) ) ;
2013-09-15 23:13:18 +02:00
if ( ring - > unaligned )
bgmac_dma_tx_enable ( bgmac , ring ) ;
2013-01-08 20:06:23 +00:00
ring - > start = 0 ;
ring - > end = 0 ; /* Points the slot that should *not* be read */
}
for ( i = 0 ; i < BGMAC_MAX_RX_RINGS ; i + + ) {
2013-02-25 08:22:26 +00:00
int j ;
2013-01-08 20:06:23 +00:00
ring = & bgmac - > rx_ring [ i ] ;
2013-09-15 23:13:18 +02:00
if ( ! ring - > unaligned )
bgmac_dma_rx_enable ( bgmac , ring ) ;
2013-01-08 20:06:23 +00:00
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_RINGLO ,
lower_32_bits ( ring - > dma_base ) ) ;
bgmac_write ( bgmac , ring - > mmio_base + BGMAC_DMA_RX_RINGHI ,
upper_32_bits ( ring - > dma_base ) ) ;
2013-09-15 23:13:18 +02:00
if ( ring - > unaligned )
bgmac_dma_rx_enable ( bgmac , ring ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:08:01 +02:00
ring - > start = 0 ;
ring - > end = 0 ;
2015-04-14 12:08:02 +02:00
for ( j = 0 ; j < BGMAC_RX_RING_SLOTS ; j + + ) {
2015-04-14 12:08:00 +02:00
err = bgmac_dma_rx_skb_for_slot ( bgmac , & ring - > slots [ j ] ) ;
if ( err )
goto error ;
2013-10-28 14:40:29 +01:00
bgmac_dma_rx_setup_desc ( bgmac , ring , j ) ;
2015-04-14 12:08:00 +02:00
}
2013-01-08 20:06:23 +00:00
2015-04-14 12:08:01 +02:00
bgmac_dma_rx_update_index ( bgmac , ring ) ;
2013-01-08 20:06:23 +00:00
}
2015-04-14 12:08:00 +02:00
return 0 ;
error :
bgmac_dma_cleanup ( bgmac ) ;
return err ;
2013-01-08 20:06:23 +00:00
}
/**************************************************
* Chip ops
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
* nothing to change ? Try if after stabilizng driver .
*/
2021-01-07 19:00:49 +01:00
static void bgmac_umac_cmd_maskset ( struct bgmac * bgmac , u32 mask , u32 set ,
bool force )
2013-01-08 20:06:23 +00:00
{
2021-01-07 19:00:50 +01:00
u32 cmdcfg = bgmac_umac_read ( bgmac , UMAC_CMD ) ;
2013-01-08 20:06:23 +00:00
u32 new_val = ( cmdcfg & mask ) | set ;
2016-07-07 19:08:56 -04:00
u32 cmdcfg_sr ;
2013-01-08 20:06:23 +00:00
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4 )
2021-01-07 19:00:50 +01:00
cmdcfg_sr = CMD_SW_RESET ;
2016-07-07 19:08:56 -04:00
else
2021-01-07 19:00:50 +01:00
cmdcfg_sr = CMD_SW_RESET_OLD ;
2016-07-07 19:08:56 -04:00
2021-01-07 19:00:50 +01:00
bgmac_umac_maskset ( bgmac , UMAC_CMD , ~ 0 , cmdcfg_sr ) ;
2013-01-08 20:06:23 +00:00
udelay ( 2 ) ;
if ( new_val ! = cmdcfg | | force )
2021-01-07 19:00:50 +01:00
bgmac_umac_write ( bgmac , UMAC_CMD , new_val ) ;
2013-01-08 20:06:23 +00:00
2021-01-07 19:00:50 +01:00
bgmac_umac_maskset ( bgmac , UMAC_CMD , ~ cmdcfg_sr , 0 ) ;
2013-01-08 20:06:23 +00:00
udelay ( 2 ) ;
}
2013-02-06 04:44:58 +00:00
static void bgmac_write_mac_address ( struct bgmac * bgmac , u8 * addr )
{
u32 tmp ;
tmp = ( addr [ 0 ] < < 24 ) | ( addr [ 1 ] < < 16 ) | ( addr [ 2 ] < < 8 ) | addr [ 3 ] ;
2021-01-07 19:00:50 +01:00
bgmac_umac_write ( bgmac , UMAC_MAC0 , tmp ) ;
2013-02-06 04:44:58 +00:00
tmp = ( addr [ 4 ] < < 8 ) | addr [ 5 ] ;
2021-01-07 19:00:50 +01:00
bgmac_umac_write ( bgmac , UMAC_MAC1 , tmp ) ;
2013-02-06 04:44:58 +00:00
}
2013-02-06 05:51:49 +00:00
static void bgmac_set_rx_mode ( struct net_device * net_dev )
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
if ( net_dev - > flags & IFF_PROMISC )
2021-01-07 19:00:50 +01:00
bgmac_umac_cmd_maskset ( bgmac , ~ 0 , CMD_PROMISC , true ) ;
2013-02-06 05:51:49 +00:00
else
2021-01-07 19:00:50 +01:00
bgmac_umac_cmd_maskset ( bgmac , ~ CMD_PROMISC , 0 , true ) ;
2013-02-06 05:51:49 +00:00
}
2013-01-08 20:06:23 +00:00
#if 0 /* We don't use that regs yet */
static void bgmac_chip_stats_update ( struct bgmac * bgmac )
{
int i ;
2016-07-07 19:08:56 -04:00
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_NO_CLR_MIB ) ) {
2013-01-08 20:06:23 +00:00
for ( i = 0 ; i < BGMAC_NUM_MIB_TX_REGS ; i + + )
bgmac - > mib_tx_regs [ i ] =
bgmac_read ( bgmac ,
BGMAC_TX_GOOD_OCTETS + ( i * 4 ) ) ;
for ( i = 0 ; i < BGMAC_NUM_MIB_RX_REGS ; i + + )
bgmac - > mib_rx_regs [ i ] =
bgmac_read ( bgmac ,
BGMAC_RX_GOOD_OCTETS + ( i * 4 ) ) ;
}
/* TODO: what else? how to handle BCM4706? Specs are needed */
}
# endif
static void bgmac_clear_mib ( struct bgmac * bgmac )
{
int i ;
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_NO_CLR_MIB )
2013-01-08 20:06:23 +00:00
return ;
bgmac_set ( bgmac , BGMAC_DEV_CTL , BGMAC_DC_MROR ) ;
for ( i = 0 ; i < BGMAC_NUM_MIB_TX_REGS ; i + + )
bgmac_read ( bgmac , BGMAC_TX_GOOD_OCTETS + ( i * 4 ) ) ;
for ( i = 0 ; i < BGMAC_NUM_MIB_RX_REGS ; i + + )
bgmac_read ( bgmac , BGMAC_RX_GOOD_OCTETS + ( i * 4 ) ) ;
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
2013-12-07 00:53:55 +01:00
static void bgmac_mac_speed ( struct bgmac * bgmac )
2013-01-08 20:06:23 +00:00
{
2021-01-07 19:00:50 +01:00
u32 mask = ~ ( CMD_SPEED_MASK < < CMD_SPEED_SHIFT | CMD_HD_EN ) ;
2013-01-08 20:06:23 +00:00
u32 set = 0 ;
2013-12-07 00:53:55 +01:00
switch ( bgmac - > mac_speed ) {
case SPEED_10 :
2021-01-07 19:00:50 +01:00
set | = CMD_SPEED_10 < < CMD_SPEED_SHIFT ;
2013-12-07 00:53:55 +01:00
break ;
case SPEED_100 :
2021-01-07 19:00:50 +01:00
set | = CMD_SPEED_100 < < CMD_SPEED_SHIFT ;
2013-12-07 00:53:55 +01:00
break ;
case SPEED_1000 :
2021-01-07 19:00:50 +01:00
set | = CMD_SPEED_1000 < < CMD_SPEED_SHIFT ;
2013-12-07 00:53:55 +01:00
break ;
2014-01-05 01:10:47 +01:00
case SPEED_2500 :
2021-01-07 19:00:50 +01:00
set | = CMD_SPEED_2500 < < CMD_SPEED_SHIFT ;
2014-01-05 01:10:47 +01:00
break ;
2013-12-07 00:53:55 +01:00
default :
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Unsupported speed: %d \n " ,
bgmac - > mac_speed ) ;
2013-12-07 00:53:55 +01:00
}
if ( bgmac - > mac_duplex = = DUPLEX_HALF )
2021-01-07 19:00:50 +01:00
set | = CMD_HD_EN ;
2013-12-07 00:53:55 +01:00
2021-01-07 19:00:49 +01:00
bgmac_umac_cmd_maskset ( bgmac , mask , set , true ) ;
2013-01-08 20:06:23 +00:00
}
static void bgmac_miiconfig ( struct bgmac * bgmac )
{
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_FORCE_SPEED_2500 ) {
2017-07-14 00:34:08 +05:30
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_IDM_MASK ) ) {
bgmac_idm_write ( bgmac , BCMA_IOCTL ,
bgmac_idm_read ( bgmac , BCMA_IOCTL ) |
0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN ) ;
}
2014-01-05 01:10:47 +01:00
bgmac - > mac_speed = SPEED_2500 ;
2013-12-07 00:53:55 +01:00
bgmac - > mac_duplex = DUPLEX_FULL ;
bgmac_mac_speed ( bgmac ) ;
2014-01-05 01:10:47 +01:00
} else {
2016-07-07 19:08:56 -04:00
u8 imode ;
2014-01-05 01:10:47 +01:00
imode = ( bgmac_read ( bgmac , BGMAC_DEV_STATUS ) &
BGMAC_DS_MM_MASK ) > > BGMAC_DS_MM_SHIFT ;
if ( imode = = 0 | | imode = = 1 ) {
bgmac - > mac_speed = SPEED_100 ;
bgmac - > mac_duplex = DUPLEX_FULL ;
bgmac_mac_speed ( bgmac ) ;
}
2013-01-08 20:06:23 +00:00
}
}
2017-07-14 00:34:08 +05:30
static void bgmac_chip_reset_idm_config ( struct bgmac * bgmac )
{
u32 iost ;
iost = bgmac_idm_read ( bgmac , BCMA_IOST ) ;
if ( bgmac - > feature_flags & BGMAC_FEAT_IOST_ATTACHED )
iost & = ~ BGMAC_BCMA_IOST_ATTACHED ;
/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_NO_RESET ) ) {
u32 flags = 0 ;
if ( iost & BGMAC_BCMA_IOST_ATTACHED ) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN ;
if ( ! bgmac - > has_robosw )
flags | = BGMAC_BCMA_IOCTL_SW_RESET ;
}
bgmac_clk_enable ( bgmac , flags ) ;
}
if ( iost & BGMAC_BCMA_IOST_ATTACHED & & ! bgmac - > has_robosw )
bgmac_idm_write ( bgmac , BCMA_IOCTL ,
bgmac_idm_read ( bgmac , BCMA_IOCTL ) &
~ BGMAC_BCMA_IOCTL_SW_RESET ) ;
}
2013-01-08 20:06:23 +00:00
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset ( struct bgmac * bgmac )
{
2016-07-07 19:08:56 -04:00
u32 cmdcfg_sr ;
2013-01-08 20:06:23 +00:00
int i ;
2016-07-07 19:08:57 -04:00
if ( bgmac_clk_enabled ( bgmac ) ) {
2013-01-08 20:06:23 +00:00
if ( ! bgmac - > stats_grabbed ) {
/* bgmac_chip_stats_update(bgmac); */
bgmac - > stats_grabbed = true ;
}
for ( i = 0 ; i < BGMAC_MAX_TX_RINGS ; i + + )
bgmac_dma_tx_reset ( bgmac , & bgmac - > tx_ring [ i ] ) ;
2021-01-07 19:00:50 +01:00
bgmac_umac_cmd_maskset ( bgmac , ~ 0 , CMD_LCL_LOOP_EN , false ) ;
2013-01-08 20:06:23 +00:00
udelay ( 1 ) ;
for ( i = 0 ; i < BGMAC_MAX_RX_RINGS ; i + + )
bgmac_dma_rx_reset ( bgmac , & bgmac - > rx_ring [ i ] ) ;
/* TODO: Clear software multicast filter list */
}
2017-07-14 00:34:08 +05:30
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_IDM_MASK ) )
bgmac_chip_reset_idm_config ( bgmac ) ;
2013-01-08 20:06:23 +00:00
2014-01-05 01:10:47 +01:00
/* Request Misc PLL for corerev > 2 */
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_MISC_PLL_REQ ) {
2013-12-11 08:44:37 +01:00
bgmac_set ( bgmac , BCMA_CLKCTLST ,
BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ ) ;
2016-07-07 19:08:57 -04:00
bgmac_wait_value ( bgmac , BCMA_CLKCTLST ,
2013-12-11 08:44:37 +01:00
BGMAC_BCMA_CLKCTLST_MISC_PLL_ST ,
BGMAC_BCMA_CLKCTLST_MISC_PLL_ST ,
2013-01-08 20:06:23 +00:00
1000 ) ;
}
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_SW_TYPE_PHY ) {
2013-01-08 20:06:23 +00:00
u8 et_swtype = 0 ;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
2013-09-15 00:22:47 +02:00
BGMAC_CHIPCTL_1_IF_TYPE_MII ;
2013-09-15 22:49:08 +02:00
char buf [ 4 ] ;
2013-01-08 20:06:23 +00:00
2013-09-15 22:49:08 +02:00
if ( bcm47xx_nvram_getenv ( " et_swtype " , buf , sizeof ( buf ) ) > 0 ) {
2013-01-08 20:06:23 +00:00
if ( kstrtou8 ( buf , 0 , & et_swtype ) )
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Failed to parse et_swtype (%s) \n " ,
buf ) ;
2013-01-08 20:06:23 +00:00
et_swtype & = 0x0f ;
et_swtype < < = 4 ;
sw_type = et_swtype ;
2016-07-07 19:08:56 -04:00
} else if ( bgmac - > feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII ) {
2016-08-17 23:11:52 +02:00
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII ;
2016-07-07 19:08:56 -04:00
} else if ( bgmac - > feature_flags & BGMAC_FEAT_SW_TYPE_RGMII ) {
2013-02-06 04:44:57 +00:00
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII ;
2013-01-08 20:06:23 +00:00
}
2016-07-07 19:08:57 -04:00
bgmac_cco_ctl_maskset ( bgmac , 1 , ~ ( BGMAC_CHIPCTL_1_IF_TYPE_MASK |
BGMAC_CHIPCTL_1_SW_TYPE_MASK ) ,
sw_type ) ;
2016-08-17 23:00:30 +02:00
} else if ( bgmac - > feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE ) {
u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
BGMAC_CHIPCTL_4_SW_TYPE_EPHY ;
u8 et_swtype = 0 ;
char buf [ 4 ] ;
if ( bcm47xx_nvram_getenv ( " et_swtype " , buf , sizeof ( buf ) ) > 0 ) {
if ( kstrtou8 ( buf , 0 , & et_swtype ) )
dev_err ( bgmac - > dev , " Failed to parse et_swtype (%s) \n " ,
buf ) ;
sw_type = ( et_swtype & 0x0f ) < < 12 ;
} else if ( bgmac - > feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII ) {
sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
BGMAC_CHIPCTL_4_SW_TYPE_RGMII ;
}
bgmac_cco_ctl_maskset ( bgmac , 4 , ~ ( BGMAC_CHIPCTL_4_IF_TYPE_MASK |
BGMAC_CHIPCTL_4_SW_TYPE_MASK ) ,
sw_type ) ;
} else if ( bgmac - > feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII ) {
bgmac_cco_ctl_maskset ( bgmac , 7 , ~ BGMAC_CHIPCTL_7_IF_TYPE_MASK ,
BGMAC_CHIPCTL_7_IF_TYPE_RGMII ) ;
2013-01-08 20:06:23 +00:00
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
2021-01-07 19:00:50 +01:00
* Specs don ' t say about using UMAC_CMD_SR , but in this routine
* UMAC_CMD is read _after_ putting chip in a reset . So it has to
2013-01-08 20:06:23 +00:00
* be keps until taking MAC out of the reset .
*/
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4 )
2021-01-07 19:00:50 +01:00
cmdcfg_sr = CMD_SW_RESET ;
2016-07-07 19:08:56 -04:00
else
2021-01-07 19:00:50 +01:00
cmdcfg_sr = CMD_SW_RESET_OLD ;
2016-07-07 19:08:56 -04:00
2021-01-07 19:00:49 +01:00
bgmac_umac_cmd_maskset ( bgmac ,
2021-01-07 19:00:50 +01:00
~ ( CMD_TX_EN |
CMD_RX_EN |
CMD_RX_PAUSE_IGNORE |
CMD_TX_ADDR_INS |
CMD_HD_EN |
CMD_LCL_LOOP_EN |
CMD_CNTL_FRM_EN |
CMD_RMT_LOOP_EN |
CMD_RX_ERR_DISC |
CMD_PRBL_EN |
CMD_TX_PAUSE_IGNORE |
CMD_PAD_EN |
CMD_PAUSE_FWD ) ,
CMD_PROMISC |
CMD_NO_LEN_CHK |
CMD_CNTL_FRM_EN |
2021-01-07 19:00:49 +01:00
cmdcfg_sr ,
false ) ;
2013-12-11 07:44:14 +01:00
bgmac - > mac_speed = SPEED_UNKNOWN ;
bgmac - > mac_duplex = DUPLEX_UNKNOWN ;
2013-01-08 20:06:23 +00:00
bgmac_clear_mib ( bgmac ) ;
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_CMN_PHY_CTL )
2016-07-07 19:08:57 -04:00
bgmac_cmn_maskset32 ( bgmac , BCMA_GMAC_CMN_PHY_CTL , ~ 0 ,
BCMA_GMAC_CMN_PC_MTE ) ;
2013-01-08 20:06:23 +00:00
else
bgmac_set ( bgmac , BGMAC_PHY_CNTL , BGMAC_PC_MTE ) ;
bgmac_miiconfig ( bgmac ) ;
2016-07-07 19:08:55 -04:00
if ( bgmac - > mii_bus )
bgmac - > mii_bus - > reset ( bgmac - > mii_bus ) ;
2013-01-08 20:06:23 +00:00
2013-09-29 13:54:58 +02:00
netdev_reset_queue ( bgmac - > net_dev ) ;
2013-01-08 20:06:23 +00:00
}
static void bgmac_chip_intrs_on ( struct bgmac * bgmac )
{
bgmac_write ( bgmac , BGMAC_INT_MASK , bgmac - > int_mask ) ;
}
static void bgmac_chip_intrs_off ( struct bgmac * bgmac )
{
bgmac_write ( bgmac , BGMAC_INT_MASK , 0 ) ;
2013-02-13 19:14:10 +00:00
bgmac_read ( bgmac , BGMAC_INT_MASK ) ;
2013-01-08 20:06:23 +00:00
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable ( struct bgmac * bgmac )
{
2016-07-07 19:08:56 -04:00
u32 cmdcfg_sr ;
2013-01-08 20:06:23 +00:00
u32 cmdcfg ;
u32 mode ;
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4 )
2021-01-07 19:00:50 +01:00
cmdcfg_sr = CMD_SW_RESET ;
2016-07-07 19:08:56 -04:00
else
2021-01-07 19:00:50 +01:00
cmdcfg_sr = CMD_SW_RESET_OLD ;
2013-01-08 20:06:23 +00:00
2021-01-07 19:00:50 +01:00
cmdcfg = bgmac_umac_read ( bgmac , UMAC_CMD ) ;
bgmac_umac_cmd_maskset ( bgmac , ~ ( CMD_TX_EN | CMD_RX_EN ) ,
2021-01-07 19:00:49 +01:00
cmdcfg_sr , true ) ;
2013-01-08 20:06:23 +00:00
udelay ( 2 ) ;
2021-01-07 19:00:50 +01:00
cmdcfg | = CMD_TX_EN | CMD_RX_EN ;
bgmac_umac_write ( bgmac , UMAC_CMD , cmdcfg ) ;
2013-01-08 20:06:23 +00:00
mode = ( bgmac_read ( bgmac , BGMAC_DEV_STATUS ) & BGMAC_DS_MM_MASK ) > >
BGMAC_DS_MM_SHIFT ;
2016-11-07 13:53:27 +01:00
if ( bgmac - > feature_flags & BGMAC_FEAT_CLKCTLST | | mode ! = 0 )
2013-01-08 20:06:23 +00:00
bgmac_set ( bgmac , BCMA_CLKCTLST , BCMA_CLKCTLST_FORCEHT ) ;
2016-11-07 13:53:27 +01:00
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_CLKCTLST ) & & mode = = 2 )
2016-07-07 19:08:57 -04:00
bgmac_cco_ctl_maskset ( bgmac , 1 , ~ 0 ,
BGMAC_CHIPCTL_1_RXC_DLL_BYPASS ) ;
2013-01-08 20:06:23 +00:00
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & ( BGMAC_FEAT_FLW_CTRL1 |
BGMAC_FEAT_FLW_CTRL2 ) ) {
u32 fl_ctl ;
if ( bgmac - > feature_flags & BGMAC_FEAT_FLW_CTRL1 )
2013-01-08 20:06:23 +00:00
fl_ctl = 0x2300e1 ;
2016-07-07 19:08:56 -04:00
else
fl_ctl = 0x03cb04cb ;
2013-01-08 20:06:23 +00:00
bgmac_write ( bgmac , BGMAC_FLOW_CTL_THRESH , fl_ctl ) ;
2021-01-07 19:00:50 +01:00
bgmac_umac_write ( bgmac , UMAC_PAUSE_CTRL , 0x27fff ) ;
2013-01-08 20:06:23 +00:00
}
2016-07-07 19:08:56 -04:00
if ( bgmac - > feature_flags & BGMAC_FEAT_SET_RXQ_CLK ) {
u32 rxq_ctl ;
u16 bp_clk ;
u8 mdp ;
2014-01-05 01:10:47 +01:00
rxq_ctl = bgmac_read ( bgmac , BGMAC_RXQ_CTL ) ;
rxq_ctl & = ~ BGMAC_RXQ_CTL_MDP_MASK ;
2016-07-07 19:08:57 -04:00
bp_clk = bgmac_get_bus_clock ( bgmac ) / 1000000 ;
2014-01-05 01:10:47 +01:00
mdp = ( bp_clk * 128 / 1000 ) - 3 ;
rxq_ctl | = ( mdp < < BGMAC_RXQ_CTL_MDP_SHIFT ) ;
bgmac_write ( bgmac , BGMAC_RXQ_CTL , rxq_ctl ) ;
}
2013-01-08 20:06:23 +00:00
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
2015-04-14 12:08:00 +02:00
static void bgmac_chip_init ( struct bgmac * bgmac )
2013-01-08 20:06:23 +00:00
{
2016-11-04 01:11:01 -04:00
/* Clear any erroneously pending interrupts */
bgmac_write ( bgmac , BGMAC_INT_STATUS , ~ 0 ) ;
2013-01-08 20:06:23 +00:00
/* 1 interrupt per received frame */
bgmac_write ( bgmac , BGMAC_INT_RECV_LAZY , 1 < < BGMAC_IRL_FC_SHIFT ) ;
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
2021-01-07 19:00:50 +01:00
bgmac_umac_cmd_maskset ( bgmac , ~ CMD_RX_PAUSE_IGNORE , 0 , true ) ;
2013-01-08 20:06:23 +00:00
2013-02-06 05:51:49 +00:00
bgmac_set_rx_mode ( bgmac - > net_dev ) ;
2013-01-08 20:06:23 +00:00
2013-02-06 04:44:58 +00:00
bgmac_write_mac_address ( bgmac , bgmac - > net_dev - > dev_addr ) ;
2013-01-08 20:06:23 +00:00
if ( bgmac - > loopback )
2021-01-07 19:00:50 +01:00
bgmac_umac_cmd_maskset ( bgmac , ~ 0 , CMD_LCL_LOOP_EN , false ) ;
2013-01-08 20:06:23 +00:00
else
2021-01-07 19:00:50 +01:00
bgmac_umac_cmd_maskset ( bgmac , ~ CMD_LCL_LOOP_EN , 0 , false ) ;
2013-01-08 20:06:23 +00:00
2021-01-07 19:00:50 +01:00
bgmac_umac_write ( bgmac , UMAC_MAX_FRAME_LEN , 32 + ETHER_MAX_LEN ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:08:00 +02:00
bgmac_chip_intrs_on ( bgmac ) ;
2013-01-08 20:06:23 +00:00
bgmac_enable ( bgmac ) ;
}
static irqreturn_t bgmac_interrupt ( int irq , void * dev_id )
{
struct bgmac * bgmac = netdev_priv ( dev_id ) ;
u32 int_status = bgmac_read ( bgmac , BGMAC_INT_STATUS ) ;
int_status & = bgmac - > int_mask ;
if ( ! int_status )
return IRQ_NONE ;
2015-04-14 12:07:55 +02:00
int_status & = ~ ( BGMAC_IS_TX0 | BGMAC_IS_RX ) ;
if ( int_status )
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Unknown IRQs: 0x%08X \n " , int_status ) ;
2013-01-08 20:06:23 +00:00
/* Disable new interrupts until handling existing ones */
bgmac_chip_intrs_off ( bgmac ) ;
napi_schedule ( & bgmac - > napi ) ;
return IRQ_HANDLED ;
}
static int bgmac_poll ( struct napi_struct * napi , int weight )
{
struct bgmac * bgmac = container_of ( napi , struct bgmac , napi ) ;
int handled = 0 ;
2015-04-14 12:07:55 +02:00
/* Ack */
bgmac_write ( bgmac , BGMAC_INT_STATUS , ~ 0 ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:55 +02:00
bgmac_dma_tx_free ( bgmac , & bgmac - > tx_ring [ 0 ] ) ;
handled + = bgmac_dma_rx_read ( bgmac , & bgmac - > rx_ring [ 0 ] , weight ) ;
2013-01-08 20:06:23 +00:00
2015-04-14 12:07:55 +02:00
/* Poll again if more events arrived in the meantime */
if ( bgmac_read ( bgmac , BGMAC_INT_STATUS ) & ( BGMAC_IS_TX0 | BGMAC_IS_RX ) )
2015-04-23 20:56:29 +02:00
return weight ;
2013-01-08 20:06:23 +00:00
2015-01-18 19:49:59 +01:00
if ( handled < weight ) {
2017-01-30 08:22:01 -08:00
napi_complete_done ( napi , handled ) ;
2015-01-18 19:49:59 +01:00
bgmac_chip_intrs_on ( bgmac ) ;
}
2013-01-08 20:06:23 +00:00
return handled ;
}
/**************************************************
* net_device_ops
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int bgmac_open ( struct net_device * net_dev )
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
int err = 0 ;
bgmac_chip_reset ( bgmac ) ;
2015-04-14 12:08:00 +02:00
err = bgmac_dma_init ( bgmac ) ;
if ( err )
return err ;
2013-01-08 20:06:23 +00:00
/* Specs say about reclaiming rings here, but we do that in DMA init */
2015-04-14 12:08:00 +02:00
bgmac_chip_init ( bgmac ) ;
2013-01-08 20:06:23 +00:00
2016-07-07 19:08:57 -04:00
err = request_irq ( bgmac - > irq , bgmac_interrupt , IRQF_SHARED ,
2018-03-27 16:20:01 -07:00
net_dev - > name , net_dev ) ;
2013-01-08 20:06:23 +00:00
if ( err < 0 ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " IRQ request error: %d! \n " , err ) ;
2015-04-14 12:08:00 +02:00
bgmac_dma_cleanup ( bgmac ) ;
return err ;
2013-01-08 20:06:23 +00:00
}
napi_enable ( & bgmac - > napi ) ;
2016-06-19 22:37:05 +02:00
phy_start ( net_dev - > phydev ) ;
2013-12-10 17:19:39 +01:00
2016-06-23 14:25:32 -07:00
netif_start_queue ( net_dev ) ;
2015-04-14 12:08:00 +02:00
return 0 ;
2013-01-08 20:06:23 +00:00
}
static int bgmac_stop ( struct net_device * net_dev )
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
netif_carrier_off ( net_dev ) ;
2016-06-19 22:37:05 +02:00
phy_stop ( net_dev - > phydev ) ;
2013-12-10 17:19:39 +01:00
2013-01-08 20:06:23 +00:00
napi_disable ( & bgmac - > napi ) ;
bgmac_chip_intrs_off ( bgmac ) ;
2016-07-07 19:08:57 -04:00
free_irq ( bgmac - > irq , net_dev ) ;
2013-01-08 20:06:23 +00:00
bgmac_chip_reset ( bgmac ) ;
2015-04-14 12:08:00 +02:00
bgmac_dma_cleanup ( bgmac ) ;
2013-01-08 20:06:23 +00:00
return 0 ;
}
static netdev_tx_t bgmac_start_xmit ( struct sk_buff * skb ,
struct net_device * net_dev )
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
struct bgmac_dma_ring * ring ;
/* No QOS support yet */
ring = & bgmac - > tx_ring [ 0 ] ;
return bgmac_dma_tx_add ( bgmac , ring , skb ) ;
}
2013-02-06 04:44:58 +00:00
static int bgmac_set_mac_address ( struct net_device * net_dev , void * addr )
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
2017-03-02 17:59:57 -05:00
struct sockaddr * sa = addr ;
2013-02-06 04:44:58 +00:00
int ret ;
ret = eth_prepare_mac_addr_change ( net_dev , addr ) ;
if ( ret < 0 )
return ret ;
2017-03-02 17:59:57 -05:00
ether_addr_copy ( net_dev - > dev_addr , sa - > sa_data ) ;
bgmac_write_mac_address ( bgmac , net_dev - > dev_addr ) ;
2013-02-06 04:44:58 +00:00
eth_commit_mac_addr_change ( net_dev , addr ) ;
return 0 ;
}
2020-03-27 21:55:41 +02:00
static int bgmac_change_mtu ( struct net_device * net_dev , int mtu )
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
2021-01-07 19:00:50 +01:00
bgmac_umac_write ( bgmac , UMAC_MAX_FRAME_LEN , 32 + mtu ) ;
2020-03-27 21:55:41 +02:00
return 0 ;
}
2013-01-08 20:06:23 +00:00
static const struct net_device_ops bgmac_netdev_ops = {
. ndo_open = bgmac_open ,
. ndo_stop = bgmac_stop ,
. ndo_start_xmit = bgmac_start_xmit ,
2013-02-06 05:51:49 +00:00
. ndo_set_rx_mode = bgmac_set_rx_mode ,
2013-02-06 04:44:58 +00:00
. ndo_set_mac_address = bgmac_set_mac_address ,
2013-02-06 04:44:59 +00:00
. ndo_validate_addr = eth_validate_addr ,
2021-07-27 15:45:13 +02:00
. ndo_eth_ioctl = phy_do_ioctl_running ,
2020-03-27 21:55:41 +02:00
. ndo_change_mtu = bgmac_change_mtu ,
2013-01-08 20:06:23 +00:00
} ;
/**************************************************
* ethtool_ops
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-06-07 15:06:14 -07:00
struct bgmac_stat {
u8 size ;
u32 offset ;
const char * name ;
} ;
static struct bgmac_stat bgmac_get_strings_stats [ ] = {
{ 8 , BGMAC_TX_GOOD_OCTETS , " tx_good_octets " } ,
{ 4 , BGMAC_TX_GOOD_PKTS , " tx_good " } ,
{ 8 , BGMAC_TX_OCTETS , " tx_octets " } ,
{ 4 , BGMAC_TX_PKTS , " tx_pkts " } ,
{ 4 , BGMAC_TX_BROADCAST_PKTS , " tx_broadcast " } ,
{ 4 , BGMAC_TX_MULTICAST_PKTS , " tx_multicast " } ,
{ 4 , BGMAC_TX_LEN_64 , " tx_64 " } ,
{ 4 , BGMAC_TX_LEN_65_TO_127 , " tx_65_127 " } ,
{ 4 , BGMAC_TX_LEN_128_TO_255 , " tx_128_255 " } ,
{ 4 , BGMAC_TX_LEN_256_TO_511 , " tx_256_511 " } ,
{ 4 , BGMAC_TX_LEN_512_TO_1023 , " tx_512_1023 " } ,
{ 4 , BGMAC_TX_LEN_1024_TO_1522 , " tx_1024_1522 " } ,
{ 4 , BGMAC_TX_LEN_1523_TO_2047 , " tx_1523_2047 " } ,
{ 4 , BGMAC_TX_LEN_2048_TO_4095 , " tx_2048_4095 " } ,
{ 4 , BGMAC_TX_LEN_4096_TO_8191 , " tx_4096_8191 " } ,
{ 4 , BGMAC_TX_LEN_8192_TO_MAX , " tx_8192_max " } ,
{ 4 , BGMAC_TX_JABBER_PKTS , " tx_jabber " } ,
{ 4 , BGMAC_TX_OVERSIZE_PKTS , " tx_oversize " } ,
{ 4 , BGMAC_TX_FRAGMENT_PKTS , " tx_fragment " } ,
{ 4 , BGMAC_TX_UNDERRUNS , " tx_underruns " } ,
{ 4 , BGMAC_TX_TOTAL_COLS , " tx_total_cols " } ,
{ 4 , BGMAC_TX_SINGLE_COLS , " tx_single_cols " } ,
{ 4 , BGMAC_TX_MULTIPLE_COLS , " tx_multiple_cols " } ,
{ 4 , BGMAC_TX_EXCESSIVE_COLS , " tx_excessive_cols " } ,
{ 4 , BGMAC_TX_LATE_COLS , " tx_late_cols " } ,
{ 4 , BGMAC_TX_DEFERED , " tx_defered " } ,
{ 4 , BGMAC_TX_CARRIER_LOST , " tx_carrier_lost " } ,
{ 4 , BGMAC_TX_PAUSE_PKTS , " tx_pause " } ,
{ 4 , BGMAC_TX_UNI_PKTS , " tx_unicast " } ,
{ 4 , BGMAC_TX_Q0_PKTS , " tx_q0 " } ,
{ 8 , BGMAC_TX_Q0_OCTETS , " tx_q0_octets " } ,
{ 4 , BGMAC_TX_Q1_PKTS , " tx_q1 " } ,
{ 8 , BGMAC_TX_Q1_OCTETS , " tx_q1_octets " } ,
{ 4 , BGMAC_TX_Q2_PKTS , " tx_q2 " } ,
{ 8 , BGMAC_TX_Q2_OCTETS , " tx_q2_octets " } ,
{ 4 , BGMAC_TX_Q3_PKTS , " tx_q3 " } ,
{ 8 , BGMAC_TX_Q3_OCTETS , " tx_q3_octets " } ,
{ 8 , BGMAC_RX_GOOD_OCTETS , " rx_good_octets " } ,
{ 4 , BGMAC_RX_GOOD_PKTS , " rx_good " } ,
{ 8 , BGMAC_RX_OCTETS , " rx_octets " } ,
{ 4 , BGMAC_RX_PKTS , " rx_pkts " } ,
{ 4 , BGMAC_RX_BROADCAST_PKTS , " rx_broadcast " } ,
{ 4 , BGMAC_RX_MULTICAST_PKTS , " rx_multicast " } ,
{ 4 , BGMAC_RX_LEN_64 , " rx_64 " } ,
{ 4 , BGMAC_RX_LEN_65_TO_127 , " rx_65_127 " } ,
{ 4 , BGMAC_RX_LEN_128_TO_255 , " rx_128_255 " } ,
{ 4 , BGMAC_RX_LEN_256_TO_511 , " rx_256_511 " } ,
{ 4 , BGMAC_RX_LEN_512_TO_1023 , " rx_512_1023 " } ,
{ 4 , BGMAC_RX_LEN_1024_TO_1522 , " rx_1024_1522 " } ,
{ 4 , BGMAC_RX_LEN_1523_TO_2047 , " rx_1523_2047 " } ,
{ 4 , BGMAC_RX_LEN_2048_TO_4095 , " rx_2048_4095 " } ,
{ 4 , BGMAC_RX_LEN_4096_TO_8191 , " rx_4096_8191 " } ,
{ 4 , BGMAC_RX_LEN_8192_TO_MAX , " rx_8192_max " } ,
{ 4 , BGMAC_RX_JABBER_PKTS , " rx_jabber " } ,
{ 4 , BGMAC_RX_OVERSIZE_PKTS , " rx_oversize " } ,
{ 4 , BGMAC_RX_FRAGMENT_PKTS , " rx_fragment " } ,
{ 4 , BGMAC_RX_MISSED_PKTS , " rx_missed " } ,
{ 4 , BGMAC_RX_CRC_ALIGN_ERRS , " rx_crc_align " } ,
{ 4 , BGMAC_RX_UNDERSIZE , " rx_undersize " } ,
{ 4 , BGMAC_RX_CRC_ERRS , " rx_crc " } ,
{ 4 , BGMAC_RX_ALIGN_ERRS , " rx_align " } ,
{ 4 , BGMAC_RX_SYMBOL_ERRS , " rx_symbol " } ,
{ 4 , BGMAC_RX_PAUSE_PKTS , " rx_pause " } ,
{ 4 , BGMAC_RX_NONPAUSE_PKTS , " rx_nonpause " } ,
{ 4 , BGMAC_RX_SACHANGES , " rx_sa_changes " } ,
{ 4 , BGMAC_RX_UNI_PKTS , " rx_unicast " } ,
} ;
# define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
static int bgmac_get_sset_count ( struct net_device * dev , int string_set )
{
switch ( string_set ) {
case ETH_SS_STATS :
return BGMAC_STATS_LEN ;
}
return - EOPNOTSUPP ;
}
static void bgmac_get_strings ( struct net_device * dev , u32 stringset ,
u8 * data )
{
int i ;
if ( stringset ! = ETH_SS_STATS )
return ;
for ( i = 0 ; i < BGMAC_STATS_LEN ; i + + )
strlcpy ( data + i * ETH_GSTRING_LEN ,
bgmac_get_strings_stats [ i ] . name , ETH_GSTRING_LEN ) ;
}
static void bgmac_get_ethtool_stats ( struct net_device * dev ,
struct ethtool_stats * ss , uint64_t * data )
{
struct bgmac * bgmac = netdev_priv ( dev ) ;
const struct bgmac_stat * s ;
unsigned int i ;
u64 val ;
if ( ! netif_running ( dev ) )
return ;
for ( i = 0 ; i < BGMAC_STATS_LEN ; i + + ) {
s = & bgmac_get_strings_stats [ i ] ;
val = 0 ;
if ( s - > size = = 8 )
val = ( u64 ) bgmac_read ( bgmac , s - > offset + 4 ) < < 32 ;
val | = bgmac_read ( bgmac , s - > offset ) ;
data [ i ] = val ;
}
}
2013-01-08 20:06:23 +00:00
static void bgmac_get_drvinfo ( struct net_device * net_dev ,
struct ethtool_drvinfo * info )
{
strlcpy ( info - > driver , KBUILD_MODNAME , sizeof ( info - > driver ) ) ;
2016-07-07 19:08:57 -04:00
strlcpy ( info - > bus_info , " AXI " , sizeof ( info - > bus_info ) ) ;
2013-01-08 20:06:23 +00:00
}
static const struct ethtool_ops bgmac_ethtool_ops = {
2016-06-07 15:06:14 -07:00
. get_strings = bgmac_get_strings ,
. get_sset_count = bgmac_get_sset_count ,
. get_ethtool_stats = bgmac_get_ethtool_stats ,
2013-01-08 20:06:23 +00:00
. get_drvinfo = bgmac_get_drvinfo ,
2016-06-19 22:37:06 +02:00
. get_link_ksettings = phy_ethtool_get_link_ksettings ,
. set_link_ksettings = phy_ethtool_set_link_ksettings ,
2013-01-08 20:06:23 +00:00
} ;
2013-03-07 01:53:28 +00:00
/**************************************************
* MII
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-11-04 01:11:00 -04:00
void bgmac_adjust_link ( struct net_device * net_dev )
2013-12-07 00:53:55 +01:00
{
struct bgmac * bgmac = netdev_priv ( net_dev ) ;
2016-06-19 22:37:05 +02:00
struct phy_device * phy_dev = net_dev - > phydev ;
2013-12-07 00:53:55 +01:00
bool update = false ;
if ( phy_dev - > link ) {
if ( phy_dev - > speed ! = bgmac - > mac_speed ) {
bgmac - > mac_speed = phy_dev - > speed ;
update = true ;
}
if ( phy_dev - > duplex ! = bgmac - > mac_duplex ) {
bgmac - > mac_duplex = phy_dev - > duplex ;
update = true ;
}
}
if ( update ) {
bgmac_mac_speed ( bgmac ) ;
phy_print_status ( phy_dev ) ;
}
}
2016-11-04 01:11:00 -04:00
EXPORT_SYMBOL_GPL ( bgmac_adjust_link ) ;
2013-12-07 00:53:55 +01:00
2016-11-04 01:11:00 -04:00
int bgmac_phy_connect_direct ( struct bgmac * bgmac )
2015-03-20 23:14:31 +01:00
{
struct fixed_phy_status fphy_status = {
. link = 1 ,
. speed = SPEED_1000 ,
. duplex = DUPLEX_FULL ,
} ;
struct phy_device * phy_dev ;
int err ;
2019-02-04 11:26:18 +01:00
phy_dev = fixed_phy_register ( PHY_POLL , & fphy_status , NULL ) ;
2015-03-20 23:14:31 +01:00
if ( ! phy_dev | | IS_ERR ( phy_dev ) ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Failed to register fixed PHY device \n " ) ;
2015-03-20 23:14:31 +01:00
return - ENODEV ;
}
err = phy_connect_direct ( bgmac - > net_dev , phy_dev , bgmac_adjust_link ,
PHY_INTERFACE_MODE_MII ) ;
if ( err ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Connecting PHY failed \n " ) ;
2015-03-20 23:14:31 +01:00
return err ;
}
return err ;
}
2016-11-04 01:11:00 -04:00
EXPORT_SYMBOL_GPL ( bgmac_phy_connect_direct ) ;
2013-03-07 01:53:28 +00:00
2017-01-31 19:37:54 +01:00
struct bgmac * bgmac_alloc ( struct device * dev )
2013-01-08 20:06:23 +00:00
{
struct net_device * net_dev ;
struct bgmac * bgmac ;
/* Allocation and references */
2017-01-31 19:37:54 +01:00
net_dev = devm_alloc_etherdev ( dev , sizeof ( * bgmac ) ) ;
2013-01-08 20:06:23 +00:00
if ( ! net_dev )
2017-01-31 19:37:54 +01:00
return NULL ;
2016-07-07 19:08:57 -04:00
2013-01-08 20:06:23 +00:00
net_dev - > netdev_ops = & bgmac_netdev_ops ;
2014-05-11 00:12:32 +00:00
net_dev - > ethtool_ops = & bgmac_ethtool_ops ;
2017-01-31 19:37:54 +01:00
2013-01-08 20:06:23 +00:00
bgmac = netdev_priv ( net_dev ) ;
2017-01-31 19:37:54 +01:00
bgmac - > dev = dev ;
2013-01-08 20:06:23 +00:00
bgmac - > net_dev = net_dev ;
2017-01-31 19:37:54 +01:00
return bgmac ;
}
EXPORT_SYMBOL_GPL ( bgmac_alloc ) ;
int bgmac_enet_probe ( struct bgmac * bgmac )
{
struct net_device * net_dev = bgmac - > net_dev ;
int err ;
2018-03-27 16:20:02 -07:00
bgmac_chip_intrs_off ( bgmac ) ;
2016-07-07 19:08:57 -04:00
net_dev - > irq = bgmac - > irq ;
SET_NETDEV_DEV ( net_dev , bgmac - > dev ) ;
2017-02-28 13:51:01 -05:00
dev_set_drvdata ( bgmac - > dev , bgmac ) ;
2016-07-07 19:08:57 -04:00
2017-02-16 15:11:19 +01:00
if ( ! is_valid_ether_addr ( net_dev - > dev_addr ) ) {
2016-07-07 19:08:57 -04:00
dev_err ( bgmac - > dev , " Invalid MAC addr: %pM \n " ,
2017-02-16 15:11:19 +01:00
net_dev - > dev_addr ) ;
eth_hw_addr_random ( net_dev ) ;
2016-07-07 19:08:57 -04:00
dev_warn ( bgmac - > dev , " Using random MAC: %pM \n " ,
2017-02-16 15:11:19 +01:00
net_dev - > dev_addr ) ;
2013-01-08 20:06:23 +00:00
}
2016-07-07 19:08:57 -04:00
/* This (reset &) enable is not preset in specs or reference driver but
* Broadcom does it in arch PCI code when enabling fake PCI device .
*/
bgmac_clk_enable ( bgmac , 0 ) ;
2013-01-08 20:06:23 +00:00
2016-08-17 23:00:30 +02:00
/* This seems to be fixing IRQ by assigning OOB #6 to the core */
2017-07-14 00:34:08 +05:30
if ( ! ( bgmac - > feature_flags & BGMAC_FEAT_IDM_MASK ) ) {
if ( bgmac - > feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6 )
bgmac_idm_write ( bgmac , BCMA_OOB_SEL_OUT_A30 , 0x86 ) ;
}
2016-08-17 23:00:30 +02:00
2013-01-08 20:06:23 +00:00
bgmac_chip_reset ( bgmac ) ;
err = bgmac_dma_alloc ( bgmac ) ;
if ( err ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Unable to alloc memory for DMA \n " ) ;
2017-01-31 19:37:54 +01:00
goto err_out ;
2013-01-08 20:06:23 +00:00
}
bgmac - > int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK ;
2013-02-21 16:16:55 +01:00
if ( bcm47xx_nvram_getenv ( " et0_no_txint " , NULL , 0 ) = = 0 )
2013-01-08 20:06:23 +00:00
bgmac - > int_mask & = ~ BGMAC_IS_TX_MASK ;
2015-01-18 19:49:58 +01:00
netif_napi_add ( net_dev , & bgmac - > napi , bgmac_poll , BGMAC_WEIGHT ) ;
2016-11-04 01:11:00 -04:00
err = bgmac_phy_connect ( bgmac ) ;
2013-03-07 01:53:28 +00:00
if ( err ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Cannot connect to phy \n " ) ;
2016-07-07 19:08:57 -04:00
goto err_dma_free ;
2013-03-07 01:53:28 +00:00
}
2015-03-23 12:35:37 +01:00
net_dev - > features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM ;
net_dev - > hw_features = net_dev - > features ;
net_dev - > vlan_features = net_dev - > features ;
2020-03-27 21:55:41 +02:00
/* Omit FCS from max MTU size */
net_dev - > max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN ;
2013-01-08 20:06:23 +00:00
err = register_netdev ( bgmac - > net_dev ) ;
if ( err ) {
2016-07-07 19:08:53 -04:00
dev_err ( bgmac - > dev , " Cannot register net device \n " ) ;
2016-07-07 19:08:55 -04:00
goto err_phy_disconnect ;
2013-01-08 20:06:23 +00:00
}
netif_carrier_off ( net_dev ) ;
return 0 ;
2016-07-07 19:08:55 -04:00
err_phy_disconnect :
phy_disconnect ( net_dev - > phydev ) ;
2013-01-08 20:06:23 +00:00
err_dma_free :
bgmac_dma_free ( bgmac ) ;
2017-01-31 19:37:54 +01:00
err_out :
2013-01-08 20:06:23 +00:00
return err ;
}
2016-07-07 19:08:57 -04:00
EXPORT_SYMBOL_GPL ( bgmac_enet_probe ) ;
2013-01-08 20:06:23 +00:00
2016-07-07 19:08:57 -04:00
void bgmac_enet_remove ( struct bgmac * bgmac )
2013-01-08 20:06:23 +00:00
{
unregister_netdev ( bgmac - > net_dev ) ;
2016-07-07 19:08:55 -04:00
phy_disconnect ( bgmac - > net_dev - > phydev ) ;
2015-01-18 19:49:58 +01:00
netif_napi_del ( & bgmac - > napi ) ;
2013-01-08 20:06:23 +00:00
bgmac_dma_free ( bgmac ) ;
free_netdev ( bgmac - > net_dev ) ;
}
2016-07-07 19:08:57 -04:00
EXPORT_SYMBOL_GPL ( bgmac_enet_remove ) ;
2013-01-08 20:06:23 +00:00
2017-02-28 13:51:01 -05:00
int bgmac_enet_suspend ( struct bgmac * bgmac )
{
if ( ! netif_running ( bgmac - > net_dev ) )
return 0 ;
phy_stop ( bgmac - > net_dev - > phydev ) ;
netif_stop_queue ( bgmac - > net_dev ) ;
napi_disable ( & bgmac - > napi ) ;
netif_tx_lock ( bgmac - > net_dev ) ;
netif_device_detach ( bgmac - > net_dev ) ;
netif_tx_unlock ( bgmac - > net_dev ) ;
bgmac_chip_intrs_off ( bgmac ) ;
bgmac_chip_reset ( bgmac ) ;
bgmac_dma_cleanup ( bgmac ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( bgmac_enet_suspend ) ;
int bgmac_enet_resume ( struct bgmac * bgmac )
{
int rc ;
if ( ! netif_running ( bgmac - > net_dev ) )
return 0 ;
rc = bgmac_dma_init ( bgmac ) ;
if ( rc )
return rc ;
bgmac_chip_init ( bgmac ) ;
napi_enable ( & bgmac - > napi ) ;
netif_tx_lock ( bgmac - > net_dev ) ;
netif_device_attach ( bgmac - > net_dev ) ;
netif_tx_unlock ( bgmac - > net_dev ) ;
netif_start_queue ( bgmac - > net_dev ) ;
phy_start ( bgmac - > net_dev - > phydev ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( bgmac_enet_resume ) ;
2013-01-08 20:06:23 +00:00
MODULE_AUTHOR ( " Rafał Miłecki " ) ;
MODULE_LICENSE ( " GPL " ) ;