2005-04-16 15:20:36 -07:00
/*
* Fast Ethernet Controller ( FEC ) driver for Motorola MPC8xx .
* Copyright ( c ) 1997 Dan Malek ( dmalek @ jlc . net )
*
2005-09-12 11:18:10 +10:00
* Right now , I am very wasteful with the buffers . I allocate memory
2005-04-16 15:20:36 -07:00
* pages and then divide them into 2 K frame buffers . This way I know I
* have buffers large enough to hold one frame within one buffer descriptor .
* Once I get this working , I will use 64 or 128 byte CPM buffers , which
* will be much more memory efficient and will easily handle lots of
* small packets .
*
* Much better multiple PHY support by Magnus Damm .
* Copyright ( c ) 2000 Ericsson Radio Systems AB .
*
2005-11-07 14:09:50 +10:00
* Support for FEC controller of ColdFire processors .
* Copyright ( c ) 2001 - 2005 Greg Ungerer ( gerg @ snapgear . com )
2005-09-12 11:18:10 +10:00
*
* Bug fixes and cleanup by Philippe De Muyter ( phdm @ macqel . be )
2006-06-27 13:05:33 +10:00
* Copyright ( c ) 2004 - 2006 Macq Electronique SA .
2011-01-05 21:13:13 +00:00
*
2011-09-23 02:12:48 +00:00
* Copyright ( C ) 2010 - 2011 Freescale Semiconductor , Inc .
2005-04-16 15:20:36 -07:00
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/string.h>
# include <linux/ptrace.h>
# include <linux/errno.h>
# include <linux/ioport.h>
# include <linux/slab.h>
# include <linux/interrupt.h>
# include <linux/delay.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/skbuff.h>
2013-04-19 08:10:49 +00:00
# include <linux/in.h>
# include <linux/ip.h>
# include <net/ip.h>
# include <linux/tcp.h>
# include <linux/udp.h>
# include <linux/icmp.h>
2005-04-16 15:20:36 -07:00
# include <linux/spinlock.h>
# include <linux/workqueue.h>
# include <linux/bitops.h>
2009-01-28 23:03:05 +00:00
# include <linux/io.h>
# include <linux/irq.h>
2009-01-28 23:03:10 +00:00
# include <linux/clk.h>
2009-01-28 23:03:11 +00:00
# include <linux/platform_device.h>
2010-03-31 02:10:44 +00:00
# include <linux/phy.h>
2010-05-24 00:36:13 -07:00
# include <linux/fec.h>
2011-06-25 02:04:35 +08:00
# include <linux/of.h>
# include <linux/of_device.h>
# include <linux/of_gpio.h>
# include <linux/of_net.h>
2012-06-27 03:45:21 +00:00
# include <linux/regulator/consumer.h>
2013-07-02 22:52:56 +01:00
# include <linux/if_vlan.h>
2005-04-16 15:20:36 -07:00
2007-07-30 16:28:46 +10:00
# include <asm/cacheflush.h>
2009-01-28 23:03:10 +00:00
2005-04-16 15:20:36 -07:00
# include "fec.h"
2013-06-27 21:18:23 +02:00
static void set_multicast_list ( struct net_device * ndev ) ;
2011-01-17 09:52:18 +01:00
# if defined(CONFIG_ARM)
2009-01-28 23:03:10 +00:00
# define FEC_ALIGNMENT 0xf
# else
# define FEC_ALIGNMENT 0x3
# endif
2011-01-05 21:13:13 +00:00
# define DRIVER_NAME "fec"
2013-01-16 16:55:58 +00:00
/* Pause frame feild and FIFO threshold */
# define FEC_ENET_FCE (1 << 5)
# define FEC_ENET_RSEM_V 0x84
# define FEC_ENET_RSFL_V 16
# define FEC_ENET_RAEM_V 0x8
# define FEC_ENET_RAFL_V 0x8
# define FEC_ENET_OPD_V 0xFFF0
2011-01-05 21:13:13 +00:00
/* Controller is ENET-MAC */
# define FEC_QUIRK_ENET_MAC (1 << 0)
/* Controller needs driver to swap frame */
# define FEC_QUIRK_SWAP_FRAME (1 << 1)
2011-07-01 18:11:22 +08:00
/* Controller uses gasket */
# define FEC_QUIRK_USE_GASKET (1 << 2)
2011-09-23 02:12:48 +00:00
/* Controller has GBIT support */
# define FEC_QUIRK_HAS_GBIT (1 << 3)
2013-01-03 16:04:23 +00:00
/* Controller has extend desc buffer */
# define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
2013-05-08 21:08:22 +00:00
/* Controller has hardware checksum support */
# define FEC_QUIRK_HAS_CSUM (1 << 5)
2013-07-02 22:52:56 +01:00
/* Controller has hardware vlan support */
# define FEC_QUIRK_HAS_VLAN (1 << 6)
2013-07-25 14:05:53 +08:00
/* ENET IP errata ERR006358
*
* If the ready bit in the transmit buffer descriptor ( TxBD [ R ] ) is previously
* detected as not set during a prior frame transmission , then the
* ENET_TDAR [ TDAR ] bit is cleared at a later time , even if additional TxBDs
* were added to the ring and the ENET_TDAR [ TDAR ] bit is set . This results in
* frames not being transmitted until there is a 0 - to - 1 transition on
* ENET_TDAR [ TDAR ] .
*/
# define FEC_QUIRK_ERR006358 (1 << 7)
2011-01-05 21:13:13 +00:00
static struct platform_device_id fec_devtype [ ] = {
{
2011-07-01 18:11:22 +08:00
/* keep it for coldfire */
2011-01-05 21:13:13 +00:00
. name = DRIVER_NAME ,
. driver_data = 0 ,
2011-07-01 18:11:22 +08:00
} , {
. name = " imx25-fec " ,
. driver_data = FEC_QUIRK_USE_GASKET ,
} , {
. name = " imx27-fec " ,
. driver_data = 0 ,
2011-01-05 21:13:13 +00:00
} , {
. name = " imx28-fec " ,
. driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME ,
2011-09-23 02:12:48 +00:00
} , {
. name = " imx6q-fec " ,
2013-01-03 16:04:23 +00:00
. driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
2013-07-02 22:52:56 +01:00
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
2013-07-25 14:05:53 +08:00
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 ,
2013-04-11 21:12:45 +00:00
} , {
2013-05-19 04:38:46 +00:00
. name = " mvf600-fec " ,
2013-04-11 21:12:45 +00:00
. driver_data = FEC_QUIRK_ENET_MAC ,
2011-07-01 18:11:22 +08:00
} , {
/* sentinel */
}
2011-01-05 21:13:13 +00:00
} ;
2011-07-01 18:11:22 +08:00
MODULE_DEVICE_TABLE ( platform , fec_devtype ) ;
2011-01-05 21:13:13 +00:00
2011-06-25 02:04:35 +08:00
enum imx_fec_type {
2011-12-07 21:59:25 +00:00
IMX25_FEC = 1 , /* runs on i.mx25/50/53 */
2011-06-25 02:04:35 +08:00
IMX27_FEC , /* runs on i.mx27/35/51 */
IMX28_FEC ,
2011-09-23 02:12:48 +00:00
IMX6Q_FEC ,
2013-05-19 04:38:46 +00:00
MVF600_FEC ,
2011-06-25 02:04:35 +08:00
} ;
static const struct of_device_id fec_dt_ids [ ] = {
{ . compatible = " fsl,imx25-fec " , . data = & fec_devtype [ IMX25_FEC ] , } ,
{ . compatible = " fsl,imx27-fec " , . data = & fec_devtype [ IMX27_FEC ] , } ,
{ . compatible = " fsl,imx28-fec " , . data = & fec_devtype [ IMX28_FEC ] , } ,
2011-09-23 02:12:48 +00:00
{ . compatible = " fsl,imx6q-fec " , . data = & fec_devtype [ IMX6Q_FEC ] , } ,
2013-05-19 04:38:46 +00:00
{ . compatible = " fsl,mvf600-fec " , . data = & fec_devtype [ MVF600_FEC ] , } ,
2011-06-25 02:04:35 +08:00
{ /* sentinel */ }
} ;
MODULE_DEVICE_TABLE ( of , fec_dt_ids ) ;
2011-01-05 21:13:11 +00:00
static unsigned char macaddr [ ETH_ALEN ] ;
module_param_array ( macaddr , byte , NULL , 0 ) ;
MODULE_PARM_DESC ( macaddr , " FEC Ethernet MAC address " ) ;
2005-04-16 15:20:36 -07:00
2011-01-05 21:13:11 +00:00
# if defined(CONFIG_M5272)
2005-04-16 15:20:36 -07:00
/*
* Some hardware gets it MAC address out of local flash memory .
* if this is non - zero then assume it is the address to get MAC from .
*/
# if defined(CONFIG_NETtel)
# define FEC_FLASHMAC 0xf0006006
# elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
# define FEC_FLASHMAC 0xf0006000
# elif defined(CONFIG_CANCam)
# define FEC_FLASHMAC 0xf0020000
2005-09-12 11:18:10 +10:00
# elif defined (CONFIG_M5272C3)
# define FEC_FLASHMAC (0xffe04000 + 4)
# elif defined(CONFIG_MOD5272)
2011-12-07 21:59:25 +00:00
# define FEC_FLASHMAC 0xffc0406b
2005-04-16 15:20:36 -07:00
# else
# define FEC_FLASHMAC 0
# endif
2009-02-26 22:42:51 -08:00
# endif /* CONFIG_M5272 */
2009-01-28 23:03:11 +00:00
2013-01-03 16:04:23 +00:00
# if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
2006-06-27 13:10:56 +10:00
# error "FEC: descriptor ring size constants too large"
2005-11-07 14:09:50 +10:00
# endif
2009-04-15 01:32:18 +00:00
/* Interrupt events/masks. */
2005-04-16 15:20:36 -07:00
# define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
# define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
# define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
# define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
# define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
# define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
# define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
# define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
# define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
# define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
2010-07-21 02:51:13 +00:00
# define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
2013-01-28 18:31:42 +00:00
# define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
2010-07-21 02:51:13 +00:00
2013-07-02 22:52:56 +01:00
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
2005-04-16 15:20:36 -07:00
*/
2013-07-02 22:52:56 +01:00
# define PKT_MAXBUF_SIZE 1522
2005-04-16 15:20:36 -07:00
# define PKT_MINBUF_SIZE 64
2013-07-02 22:52:56 +01:00
# define PKT_MAXBLR_SIZE 1536
2005-04-16 15:20:36 -07:00
2013-04-19 08:10:49 +00:00
/* FEC receive acceleration */
# define FEC_RACC_IPDIS (1 << 1)
# define FEC_RACC_PRODIS (1 << 2)
# define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
2005-04-16 15:20:36 -07:00
/*
2006-06-27 13:10:56 +10:00
* The 5270 / 5271 / 5280 / 5282 / 532 x RX control register also contains maximum frame
2005-04-16 15:20:36 -07:00
* size bits . Other FEC hardware does not , so we need to take that into
* account when setting it .
*/
2005-11-07 14:09:50 +10:00
# if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2011-01-17 09:52:18 +01:00
defined ( CONFIG_M520x ) | | defined ( CONFIG_M532x ) | | defined ( CONFIG_ARM )
2005-04-16 15:20:36 -07:00
# define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
# else
# define OPT_FRAME_SIZE 0
# endif
2010-03-31 02:10:44 +00:00
/* FEC MII MMFR bits definition */
# define FEC_MMFR_ST (1 << 30)
# define FEC_MMFR_OP_READ (2 << 28)
# define FEC_MMFR_OP_WRITE (1 << 28)
# define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
# define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
# define FEC_MMFR_TA (2 << 16)
# define FEC_MMFR_DATA(v) (v & 0xffff)
2005-04-16 15:20:36 -07:00
2011-12-27 14:07:37 -05:00
# define FEC_MII_TIMEOUT 30000 /* us */
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* Transmitter timeout */
# define TX_TIMEOUT (2 * HZ)
2005-04-16 15:20:36 -07:00
2013-01-16 16:55:58 +00:00
# define FEC_PAUSE_FLAG_AUTONEG 0x1
# define FEC_PAUSE_FLAG_ENABLE 0x2
2011-12-07 21:59:31 +00:00
static int mii_cnt ;
2013-09-03 10:41:18 +08:00
static inline
struct bufdesc * fec_enet_get_nextdesc ( struct bufdesc * bdp , struct fec_enet_private * fep )
2013-01-03 16:04:23 +00:00
{
2013-09-03 10:41:18 +08:00
struct bufdesc * new_bd = bdp + 1 ;
struct bufdesc_ex * ex_new_bd = ( struct bufdesc_ex * ) bdp + 1 ;
struct bufdesc_ex * ex_base ;
struct bufdesc * base ;
int ring_size ;
if ( bdp > = fep - > tx_bd_base ) {
base = fep - > tx_bd_base ;
ring_size = fep - > tx_ring_size ;
ex_base = ( struct bufdesc_ex * ) fep - > tx_bd_base ;
} else {
base = fep - > rx_bd_base ;
ring_size = fep - > rx_ring_size ;
ex_base = ( struct bufdesc_ex * ) fep - > rx_bd_base ;
}
if ( fep - > bufdesc_ex )
return ( struct bufdesc * ) ( ( ex_new_bd > = ( ex_base + ring_size ) ) ?
ex_base : ex_new_bd ) ;
2013-01-03 16:04:23 +00:00
else
2013-09-03 10:41:18 +08:00
return ( new_bd > = ( base + ring_size ) ) ?
base : new_bd ;
2013-01-03 16:04:23 +00:00
}
2013-09-03 10:41:18 +08:00
static inline
struct bufdesc * fec_enet_get_prevdesc ( struct bufdesc * bdp , struct fec_enet_private * fep )
2013-01-03 16:04:23 +00:00
{
2013-09-03 10:41:18 +08:00
struct bufdesc * new_bd = bdp - 1 ;
struct bufdesc_ex * ex_new_bd = ( struct bufdesc_ex * ) bdp - 1 ;
struct bufdesc_ex * ex_base ;
struct bufdesc * base ;
int ring_size ;
if ( bdp > = fep - > tx_bd_base ) {
base = fep - > tx_bd_base ;
ring_size = fep - > tx_ring_size ;
ex_base = ( struct bufdesc_ex * ) fep - > tx_bd_base ;
} else {
base = fep - > rx_bd_base ;
ring_size = fep - > rx_ring_size ;
ex_base = ( struct bufdesc_ex * ) fep - > rx_bd_base ;
}
if ( fep - > bufdesc_ex )
return ( struct bufdesc * ) ( ( ex_new_bd < ex_base ) ?
( ex_new_bd + ring_size ) : ex_new_bd ) ;
2013-01-03 16:04:23 +00:00
else
2013-09-03 10:41:18 +08:00
return ( new_bd < base ) ? ( new_bd + ring_size ) : new_bd ;
2013-01-03 16:04:23 +00:00
}
2011-01-05 21:13:13 +00:00
static void * swap_buffer ( void * bufaddr , int len )
{
int i ;
unsigned int * buf = bufaddr ;
2013-05-21 05:44:26 +00:00
for ( i = 0 ; i < DIV_ROUND_UP ( len , 4 ) ; i + + , buf + + )
2011-01-05 21:13:13 +00:00
* buf = cpu_to_be32 ( * buf ) ;
return bufaddr ;
}
2013-04-19 08:10:49 +00:00
static int
fec_enet_clear_csum ( struct sk_buff * skb , struct net_device * ndev )
{
/* Only run for packets requiring a checksum. */
if ( skb - > ip_summed ! = CHECKSUM_PARTIAL )
return 0 ;
if ( unlikely ( skb_cow_head ( skb , 0 ) ) )
return - 1 ;
* ( __sum16 * ) ( skb - > head + skb - > csum_start + skb - > csum_offset ) = 0 ;
return 0 ;
}
2010-06-02 09:15:47 +00:00
static netdev_tx_t
2011-01-19 11:58:12 +01:00
fec_enet_start_xmit ( struct sk_buff * skb , struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-01-05 21:13:13 +00:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
2013-07-25 14:05:53 +08:00
struct bufdesc * bdp , * bdp_pre ;
2009-08-06 17:58:18 +00:00
void * bufaddr ;
2006-06-27 13:19:33 +10:00
unsigned short status ;
2013-03-03 17:34:25 +00:00
unsigned int index ;
2005-04-16 15:20:36 -07:00
/* Fill in a Tx ring entry */
bdp = fep - > cur_tx ;
2006-06-27 13:19:33 +10:00
status = bdp - > cbd_sc ;
2009-04-15 01:32:18 +00:00
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_READY ) {
2005-04-16 15:20:36 -07:00
/* Ooops. All transmit buffers are full. Bail out.
2011-01-19 11:58:12 +01:00
* This should not happen , since ndev - > tbusy should be set .
2005-04-16 15:20:36 -07:00
*/
2013-04-13 19:03:17 +00:00
netdev_err ( ndev , " tx queue full! \n " ) ;
2009-06-12 06:22:29 +00:00
return NETDEV_TX_BUSY ;
2005-04-16 15:20:36 -07:00
}
2013-04-19 08:10:49 +00:00
/* Protocol checksum off-load for TCP and UDP. */
if ( fec_enet_clear_csum ( skb , ndev ) ) {
2014-03-15 17:12:53 -07:00
dev_kfree_skb_any ( skb ) ;
2013-04-19 08:10:49 +00:00
return NETDEV_TX_OK ;
}
2009-04-15 01:32:18 +00:00
/* Clear all of the status flags */
2006-06-27 13:19:33 +10:00
status & = ~ BD_ENET_TX_STATS ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* Set buffer length and buffer pointer */
2009-08-06 17:58:18 +00:00
bufaddr = skb - > data ;
2005-04-16 15:20:36 -07:00
bdp - > cbd_datlen = skb - > len ;
/*
2009-04-15 01:32:18 +00:00
* On some FEC implementations data must be aligned on
* 4 - byte boundaries . Use bounce buffers to copy data
* and get it aligned . Ugh .
2005-04-16 15:20:36 -07:00
*/
2013-03-03 17:34:25 +00:00
if ( fep - > bufdesc_ex )
index = ( struct bufdesc_ex * ) bdp -
( struct bufdesc_ex * ) fep - > tx_bd_base ;
else
index = bdp - fep - > tx_bd_base ;
2009-08-06 17:58:18 +00:00
if ( ( ( unsigned long ) bufaddr ) & FEC_ALIGNMENT ) {
2011-01-13 21:34:31 +01:00
memcpy ( fep - > tx_bounce [ index ] , skb - > data , skb - > len ) ;
2009-08-06 17:58:18 +00:00
bufaddr = fep - > tx_bounce [ index ] ;
2005-04-16 15:20:36 -07:00
}
2011-01-05 21:13:13 +00:00
/*
* Some design made an incorrect assumption on endian mode of
* the system that it ' s running on . As the result , driver has to
* swap every frame going to and coming from the controller .
*/
if ( id_entry - > driver_data & FEC_QUIRK_SWAP_FRAME )
swap_buffer ( bufaddr , skb - > len ) ;
2009-04-15 01:32:18 +00:00
/* Save skb pointer */
2013-03-03 17:34:25 +00:00
fep - > tx_skbuff [ index ] = skb ;
2006-09-13 13:24:59 -04:00
2005-04-16 15:20:36 -07:00
/* Push the data cache so the CPM does not get stale memory
* data .
*/
2011-01-20 09:26:38 +01:00
bdp - > cbd_bufaddr = dma_map_single ( & fep - > pdev - > dev , bufaddr ,
2013-12-02 10:52:55 +01:00
skb - > len , DMA_TO_DEVICE ) ;
2013-11-14 09:57:10 +08:00
if ( dma_mapping_error ( & fep - > pdev - > dev , bdp - > cbd_bufaddr ) ) {
bdp - > cbd_bufaddr = 0 ;
fep - > tx_skbuff [ index ] = NULL ;
dev_kfree_skb_any ( skb ) ;
if ( net_ratelimit ( ) )
netdev_err ( ndev , " Tx DMA memory map failed \n " ) ;
return NETDEV_TX_OK ;
}
2005-04-16 15:20:36 -07:00
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex ) {
struct bufdesc_ex * ebdp = ( struct bufdesc_ex * ) bdp ;
ebdp - > cbd_bdu = 0 ;
if ( unlikely ( skb_shinfo ( skb ) - > tx_flags & SKBTX_HW_TSTAMP & &
2012-10-30 18:25:31 +00:00
fep - > hwts_tx_en ) ) {
2013-01-03 16:04:23 +00:00
ebdp - > cbd_esc = ( BD_ENET_TX_TS | BD_ENET_TX_INT ) ;
2012-10-30 18:25:31 +00:00
skb_shinfo ( skb ) - > tx_flags | = SKBTX_IN_PROGRESS ;
2013-01-03 16:04:23 +00:00
} else {
ebdp - > cbd_esc = BD_ENET_TX_INT ;
2013-04-19 08:10:49 +00:00
/* Enable protocol checksum flags
* We do not bother with the IP Checksum bits as they
* are done by the kernel
*/
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
ebdp - > cbd_esc | = BD_ENET_TX_PINS ;
2013-01-03 16:04:23 +00:00
}
2012-10-30 18:25:31 +00:00
}
2013-07-25 14:05:53 +08:00
2014-02-20 18:14:39 +08:00
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it ' s the last BD of the frame , and to put the CRC on the end .
*/
status | = ( BD_ENET_TX_READY | BD_ENET_TX_INTR
| BD_ENET_TX_LAST | BD_ENET_TX_TC ) ;
bdp - > cbd_sc = status ;
2013-09-03 10:41:18 +08:00
bdp_pre = fec_enet_get_prevdesc ( bdp , fep ) ;
2013-07-25 14:05:53 +08:00
if ( ( id_entry - > driver_data & FEC_QUIRK_ERR006358 ) & &
! ( bdp_pre - > cbd_sc & BD_ENET_TX_READY ) ) {
fep - > delay_work . trig_tx = true ;
schedule_delayed_work ( & ( fep - > delay_work . delay_work ) ,
msecs_to_jiffies ( 1 ) ) ;
}
2009-04-15 01:32:18 +00:00
/* If this was the last BD in the ring, start at the beginning again. */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2005-04-16 15:20:36 -07:00
2013-12-19 10:53:02 -08:00
skb_tx_timestamp ( skb ) ;
2013-03-03 17:34:25 +00:00
fep - > cur_tx = bdp ;
if ( fep - > cur_tx = = fep - > dirty_tx )
2011-01-19 11:58:12 +01:00
netif_stop_queue ( ndev ) ;
2005-04-16 15:20:36 -07:00
2013-03-03 17:34:25 +00:00
/* Trigger transmission start */
writel ( 0 , fep - > hwp + FEC_X_DES_ACTIVE ) ;
2005-04-16 15:20:36 -07:00
2009-06-23 06:03:08 +00:00
return NETDEV_TX_OK ;
2005-04-16 15:20:36 -07:00
}
2013-03-26 16:12:03 +00:00
/* Init RX & TX buffer descriptors
*/
static void fec_enet_bd_init ( struct net_device * dev )
{
struct fec_enet_private * fep = netdev_priv ( dev ) ;
struct bufdesc * bdp ;
unsigned int i ;
/* Initialize the receive buffer descriptors. */
bdp = fep - > rx_bd_base ;
2013-09-03 10:41:18 +08:00
for ( i = 0 ; i < fep - > rx_ring_size ; i + + ) {
2013-03-26 16:12:03 +00:00
/* Initialize the BD for every fragment in the page. */
if ( bdp - > cbd_bufaddr )
bdp - > cbd_sc = BD_ENET_RX_EMPTY ;
else
bdp - > cbd_sc = 0 ;
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2013-03-26 16:12:03 +00:00
}
/* Set the last buffer to wrap */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_prevdesc ( bdp , fep ) ;
2013-03-26 16:12:03 +00:00
bdp - > cbd_sc | = BD_SC_WRAP ;
fep - > cur_rx = fep - > rx_bd_base ;
/* ...and the same for transmit */
bdp = fep - > tx_bd_base ;
fep - > cur_tx = bdp ;
2013-09-03 10:41:18 +08:00
for ( i = 0 ; i < fep - > tx_ring_size ; i + + ) {
2013-03-26 16:12:03 +00:00
/* Initialize the BD for every fragment in the page. */
bdp - > cbd_sc = 0 ;
if ( bdp - > cbd_bufaddr & & fep - > tx_skbuff [ i ] ) {
dev_kfree_skb_any ( fep - > tx_skbuff [ i ] ) ;
fep - > tx_skbuff [ i ] = NULL ;
}
bdp - > cbd_bufaddr = 0 ;
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2013-03-26 16:12:03 +00:00
}
/* Set the last buffer to wrap */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_prevdesc ( bdp , fep ) ;
2013-03-26 16:12:03 +00:00
bdp - > cbd_sc | = BD_SC_WRAP ;
fep - > dirty_tx = bdp ;
}
2011-01-19 20:47:04 +01:00
/* This function is called to start or restart the FEC during a link
* change . This only happens when switching between half and full
* duplex .
*/
2005-04-16 15:20:36 -07:00
static void
2011-01-19 20:47:04 +01:00
fec_restart ( struct net_device * ndev , int duplex )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-01-19 20:47:04 +01:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
int i ;
2013-04-19 08:10:49 +00:00
u32 val ;
2011-01-25 22:11:25 +01:00
u32 temp_mac [ 2 ] ;
u32 rcntl = OPT_FRAME_SIZE | 0x04 ;
2011-09-23 02:12:48 +00:00
u32 ecntl = 0x2 ; /* ETHEREN */
2005-04-16 15:20:36 -07:00
2013-05-07 14:08:44 +00:00
if ( netif_running ( ndev ) ) {
netif_device_detach ( ndev ) ;
napi_disable ( & fep - > napi ) ;
netif_stop_queue ( ndev ) ;
2013-05-15 07:06:26 +00:00
netif_tx_lock_bh ( ndev ) ;
2013-05-07 14:08:44 +00:00
}
2011-01-19 20:47:04 +01:00
/* Whack a reset. We should wait for this. */
writel ( 1 , fep - > hwp + FEC_ECNTRL ) ;
udelay ( 10 ) ;
2005-04-16 15:20:36 -07:00
2011-01-19 20:47:04 +01:00
/*
* enet - mac reset will reset mac address registers too ,
* so need to reconfigure it .
*/
if ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC ) {
memcpy ( & temp_mac , ndev - > dev_addr , ETH_ALEN ) ;
writel ( cpu_to_be32 ( temp_mac [ 0 ] ) , fep - > hwp + FEC_ADDR_LOW ) ;
writel ( cpu_to_be32 ( temp_mac [ 1 ] ) , fep - > hwp + FEC_ADDR_HIGH ) ;
}
2005-04-16 15:20:36 -07:00
2011-01-19 20:47:04 +01:00
/* Clear any outstanding interrupt. */
writel ( 0xffc00000 , fep - > hwp + FEC_IEVENT ) ;
2005-04-16 15:20:36 -07:00
2011-01-19 20:47:04 +01:00
/* Set maximum receive buffer size. */
writel ( PKT_MAXBLR_SIZE , fep - > hwp + FEC_R_BUFF_SIZE ) ;
2005-04-16 15:20:36 -07:00
2013-03-26 16:12:03 +00:00
fec_enet_bd_init ( ndev ) ;
2011-01-19 20:47:04 +01:00
/* Set receive and transmit descriptor base. */
writel ( fep - > bd_dma , fep - > hwp + FEC_R_DES_START ) ;
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex )
writel ( ( unsigned long ) fep - > bd_dma + sizeof ( struct bufdesc_ex )
2013-09-03 10:41:18 +08:00
* fep - > rx_ring_size , fep - > hwp + FEC_X_DES_START ) ;
2013-01-03 16:04:23 +00:00
else
writel ( ( unsigned long ) fep - > bd_dma + sizeof ( struct bufdesc )
2013-09-03 10:41:18 +08:00
* fep - > rx_ring_size , fep - > hwp + FEC_X_DES_START ) ;
2011-01-19 20:47:04 +01:00
for ( i = 0 ; i < = TX_RING_MOD_MASK ; i + + ) {
if ( fep - > tx_skbuff [ i ] ) {
dev_kfree_skb_any ( fep - > tx_skbuff [ i ] ) ;
fep - > tx_skbuff [ i ] = NULL ;
2005-04-16 15:20:36 -07:00
}
2011-01-19 20:47:04 +01:00
}
2010-07-11 21:12:51 +00:00
2011-01-19 20:47:04 +01:00
/* Enable MII mode */
if ( duplex ) {
2011-01-25 22:11:25 +01:00
/* FD enable */
2011-01-19 20:47:04 +01:00
writel ( 0x04 , fep - > hwp + FEC_X_CNTRL ) ;
} else {
2011-01-25 22:11:25 +01:00
/* No Rcv on Xmit */
rcntl | = 0x02 ;
2011-01-19 20:47:04 +01:00
writel ( 0x0 , fep - > hwp + FEC_X_CNTRL ) ;
}
2011-01-25 22:11:25 +01:00
2011-01-19 20:47:04 +01:00
fep - > full_duplex = duplex ;
/* Set MII speed */
writel ( fep - > phy_speed , fep - > hwp + FEC_MII_SPEED ) ;
2013-06-18 10:04:59 -07:00
# if !defined(CONFIG_M5272)
2013-04-19 08:10:49 +00:00
/* set RX checksum */
val = readl ( fep - > hwp + FEC_RACC ) ;
if ( fep - > csum_flags & FLAG_RX_CSUM_ENABLED )
val | = FEC_RACC_OPTIONS ;
else
val & = ~ FEC_RACC_OPTIONS ;
writel ( val , fep - > hwp + FEC_RACC ) ;
2013-06-18 10:04:59 -07:00
# endif
2013-04-19 08:10:49 +00:00
2011-01-19 20:47:04 +01:00
/*
* The phy interface and speed need to get configured
* differently on enet - mac .
*/
if ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC ) {
2011-01-25 22:11:25 +01:00
/* Enable flow control and length check */
rcntl | = 0x40000000 | 0x00000020 ;
2011-01-19 20:47:04 +01:00
2011-09-23 02:12:48 +00:00
/* RGMII, RMII or MII */
if ( fep - > phy_interface = = PHY_INTERFACE_MODE_RGMII )
rcntl | = ( 1 < < 6 ) ;
else if ( fep - > phy_interface = = PHY_INTERFACE_MODE_RMII )
2011-01-25 22:11:25 +01:00
rcntl | = ( 1 < < 8 ) ;
2011-01-19 20:47:04 +01:00
else
2011-01-25 22:11:25 +01:00
rcntl & = ~ ( 1 < < 8 ) ;
2011-01-19 20:47:04 +01:00
2011-09-23 02:12:48 +00:00
/* 1G, 100M or 10M */
if ( fep - > phy_dev ) {
if ( fep - > phy_dev - > speed = = SPEED_1000 )
ecntl | = ( 1 < < 5 ) ;
else if ( fep - > phy_dev - > speed = = SPEED_100 )
rcntl & = ~ ( 1 < < 9 ) ;
else
rcntl | = ( 1 < < 9 ) ;
}
2011-01-19 20:47:04 +01:00
} else {
# ifdef FEC_MIIGSK_ENR
2011-07-01 18:11:22 +08:00
if ( id_entry - > driver_data & FEC_QUIRK_USE_GASKET ) {
2012-01-12 06:10:28 +00:00
u32 cfgr ;
2011-01-19 20:47:04 +01:00
/* disable the gasket and wait */
writel ( 0 , fep - > hwp + FEC_MIIGSK_ENR ) ;
while ( readl ( fep - > hwp + FEC_MIIGSK_ENR ) & 4 )
udelay ( 1 ) ;
/*
* configure the gasket :
* RMII , 50 MHz , no loopback , no echo
2011-07-01 18:11:22 +08:00
* MII , 25 MHz , no loopback , no echo
2011-01-19 20:47:04 +01:00
*/
2012-01-12 06:10:28 +00:00
cfgr = ( fep - > phy_interface = = PHY_INTERFACE_MODE_RMII )
? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII ;
if ( fep - > phy_dev & & fep - > phy_dev - > speed = = SPEED_10 )
cfgr | = BM_MIIGSK_CFGR_FRCONT_10M ;
writel ( cfgr , fep - > hwp + FEC_MIIGSK_CFGR ) ;
2011-01-19 20:47:04 +01:00
/* re-enable the gasket */
writel ( 2 , fep - > hwp + FEC_MIIGSK_ENR ) ;
2010-07-11 21:12:51 +00:00
}
2011-01-19 20:47:04 +01:00
# endif
}
2013-01-16 16:55:58 +00:00
2013-06-18 10:04:59 -07:00
# if !defined(CONFIG_M5272)
2013-01-16 16:55:58 +00:00
/* enable pause frame*/
if ( ( fep - > pause_flag & FEC_PAUSE_FLAG_ENABLE ) | |
( ( fep - > pause_flag & FEC_PAUSE_FLAG_AUTONEG ) & &
fep - > phy_dev & & fep - > phy_dev - > pause ) ) {
rcntl | = FEC_ENET_FCE ;
2013-04-19 08:10:49 +00:00
/* set FIFO threshold parameter to reduce overrun */
2013-01-16 16:55:58 +00:00
writel ( FEC_ENET_RSEM_V , fep - > hwp + FEC_R_FIFO_RSEM ) ;
writel ( FEC_ENET_RSFL_V , fep - > hwp + FEC_R_FIFO_RSFL ) ;
writel ( FEC_ENET_RAEM_V , fep - > hwp + FEC_R_FIFO_RAEM ) ;
writel ( FEC_ENET_RAFL_V , fep - > hwp + FEC_R_FIFO_RAFL ) ;
/* OPD */
writel ( FEC_ENET_OPD_V , fep - > hwp + FEC_OPD ) ;
} else {
rcntl & = ~ FEC_ENET_FCE ;
}
2013-06-18 10:04:59 -07:00
# endif /* !defined(CONFIG_M5272) */
2013-01-16 16:55:58 +00:00
2011-01-25 22:11:25 +01:00
writel ( rcntl , fep - > hwp + FEC_R_CNTRL ) ;
2008-05-01 14:08:12 +10:00
2014-03-12 11:28:19 +01:00
/* Setup multicast filter. */
set_multicast_list ( ndev ) ;
# ifndef CONFIG_M5272
writel ( 0 , fep - > hwp + FEC_HASH_TABLE_HIGH ) ;
writel ( 0 , fep - > hwp + FEC_HASH_TABLE_LOW ) ;
# endif
2011-09-23 02:12:48 +00:00
if ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC ) {
/* enable ENET endian swap */
ecntl | = ( 1 < < 8 ) ;
/* enable ENET store and forward mode */
writel ( 1 < < 8 , fep - > hwp + FEC_X_WMRK ) ;
}
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex )
ecntl | = ( 1 < < 4 ) ;
2012-10-30 18:25:31 +00:00
2013-06-25 23:18:52 -07:00
# ifndef CONFIG_M5272
2013-07-01 14:57:54 +01:00
/* Enable the MIB statistic event counters */
writel ( 0 < < 31 , fep - > hwp + FEC_MIB_CTRLSTAT ) ;
2013-06-25 23:18:52 -07:00
# endif
2011-01-19 20:47:04 +01:00
/* And last, enable the transmit and receive processing */
2011-09-23 02:12:48 +00:00
writel ( ecntl , fep - > hwp + FEC_ECNTRL ) ;
2011-01-19 20:47:04 +01:00
writel ( 0 , fep - > hwp + FEC_R_DES_ACTIVE ) ;
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex )
fec_ptp_start_cyclecounter ( ndev ) ;
2011-01-19 20:47:04 +01:00
/* Enable interrupts we wish to service */
writel ( FEC_DEFAULT_IMASK , fep - > hwp + FEC_IMASK ) ;
2013-05-07 14:08:44 +00:00
if ( netif_running ( ndev ) ) {
2013-05-15 07:06:26 +00:00
netif_tx_unlock_bh ( ndev ) ;
2013-05-07 14:08:44 +00:00
netif_wake_queue ( ndev ) ;
2013-05-15 07:06:27 +00:00
napi_enable ( & fep - > napi ) ;
netif_device_attach ( ndev ) ;
2013-05-07 14:08:44 +00:00
}
2011-01-19 20:47:04 +01:00
}
static void
fec_stop ( struct net_device * ndev )
{
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-09-23 02:12:48 +00:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
2011-12-07 21:59:30 +00:00
u32 rmii_mode = readl ( fep - > hwp + FEC_R_CNTRL ) & ( 1 < < 8 ) ;
2011-01-19 20:47:04 +01:00
/* We cannot expect a graceful transmit stop without link !!! */
if ( fep - > link ) {
writel ( 1 , fep - > hwp + FEC_X_CNTRL ) ; /* Graceful transmit stop */
udelay ( 10 ) ;
if ( ! ( readl ( fep - > hwp + FEC_IEVENT ) & FEC_ENET_GRA ) )
2013-04-13 19:03:17 +00:00
netdev_err ( ndev , " Graceful transmit stop did not complete! \n " ) ;
2011-01-19 20:47:04 +01:00
}
/* Whack a reset. We should wait for this. */
writel ( 1 , fep - > hwp + FEC_ECNTRL ) ;
udelay ( 10 ) ;
writel ( fep - > phy_speed , fep - > hwp + FEC_MII_SPEED ) ;
writel ( FEC_DEFAULT_IMASK , fep - > hwp + FEC_IMASK ) ;
2011-09-23 02:12:48 +00:00
/* We have to keep ENET enabled to have MII interrupt stay working */
2011-12-07 21:59:30 +00:00
if ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC ) {
2011-09-23 02:12:48 +00:00
writel ( 2 , fep - > hwp + FEC_ECNTRL ) ;
2011-12-07 21:59:30 +00:00
writel ( rmii_mode , fep - > hwp + FEC_R_CNTRL ) ;
}
2005-04-16 15:20:36 -07:00
}
2011-01-19 20:47:04 +01:00
static void
fec_timeout ( struct net_device * ndev )
{
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
ndev - > stats . tx_errors + + ;
2013-05-07 14:08:44 +00:00
fep - > delay_work . timeout = true ;
schedule_delayed_work ( & ( fep - > delay_work . delay_work ) , 0 ) ;
}
static void fec_enet_work ( struct work_struct * work )
{
struct fec_enet_private * fep =
container_of ( work ,
struct fec_enet_private ,
delay_work . delay_work . work ) ;
if ( fep - > delay_work . timeout ) {
fep - > delay_work . timeout = false ;
fec_restart ( fep - > netdev , fep - > full_duplex ) ;
netif_wake_queue ( fep - > netdev ) ;
}
2013-07-25 14:05:53 +08:00
if ( fep - > delay_work . trig_tx ) {
fep - > delay_work . trig_tx = false ;
writel ( 0 , fep - > hwp + FEC_X_DES_ACTIVE ) ;
}
2011-01-19 20:47:04 +01:00
}
2005-04-16 15:20:36 -07:00
static void
2011-01-19 11:58:12 +01:00
fec_enet_tx ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
struct fec_enet_private * fep ;
2009-04-15 01:32:16 +00:00
struct bufdesc * bdp ;
2006-06-27 13:19:33 +10:00
unsigned short status ;
2005-04-16 15:20:36 -07:00
struct sk_buff * skb ;
2013-03-03 17:34:25 +00:00
int index = 0 ;
2005-04-16 15:20:36 -07:00
2011-01-19 11:58:12 +01:00
fep = netdev_priv ( ndev ) ;
2005-04-16 15:20:36 -07:00
bdp = fep - > dirty_tx ;
2013-03-03 17:34:25 +00:00
/* get next bdp of dirty_tx */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2013-03-03 17:34:25 +00:00
2006-06-27 13:19:33 +10:00
while ( ( ( status = bdp - > cbd_sc ) & BD_ENET_TX_READY ) = = 0 ) {
2013-03-03 17:34:25 +00:00
/* current queue is empty */
if ( bdp = = fep - > cur_tx )
2009-04-15 01:32:24 +00:00
break ;
2013-03-03 17:34:25 +00:00
if ( fep - > bufdesc_ex )
index = ( struct bufdesc_ex * ) bdp -
( struct bufdesc_ex * ) fep - > tx_bd_base ;
else
index = bdp - fep - > tx_bd_base ;
skb = fep - > tx_skbuff [ index ] ;
2013-12-02 10:52:55 +01:00
dma_unmap_single ( & fep - > pdev - > dev , bdp - > cbd_bufaddr , skb - > len ,
DMA_TO_DEVICE ) ;
bdp - > cbd_bufaddr = 0 ;
2013-03-03 17:34:25 +00:00
2005-04-16 15:20:36 -07:00
/* Check for errors. */
2006-06-27 13:19:33 +10:00
if ( status & ( BD_ENET_TX_HB | BD_ENET_TX_LC |
2005-04-16 15:20:36 -07:00
BD_ENET_TX_RL | BD_ENET_TX_UN |
BD_ENET_TX_CSL ) ) {
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_errors + + ;
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_HB ) /* No heartbeat */
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_heartbeat_errors + + ;
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_LC ) /* Late collision */
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_window_errors + + ;
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_RL ) /* Retrans limit */
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_aborted_errors + + ;
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_UN ) /* Underrun */
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_fifo_errors + + ;
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_CSL ) /* Carrier lost */
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_carrier_errors + + ;
2005-04-16 15:20:36 -07:00
} else {
2011-01-19 11:58:12 +01:00
ndev - > stats . tx_packets + + ;
2013-06-27 19:25:08 +01:00
ndev - > stats . tx_bytes + = bdp - > cbd_datlen ;
2005-04-16 15:20:36 -07:00
}
2013-01-03 16:04:23 +00:00
if ( unlikely ( skb_shinfo ( skb ) - > tx_flags & SKBTX_IN_PROGRESS ) & &
fep - > bufdesc_ex ) {
2012-10-30 18:25:31 +00:00
struct skb_shared_hwtstamps shhwtstamps ;
unsigned long flags ;
2013-01-03 16:04:23 +00:00
struct bufdesc_ex * ebdp = ( struct bufdesc_ex * ) bdp ;
2012-10-30 18:25:31 +00:00
memset ( & shhwtstamps , 0 , sizeof ( shhwtstamps ) ) ;
spin_lock_irqsave ( & fep - > tmreg_lock , flags ) ;
shhwtstamps . hwtstamp = ns_to_ktime (
2013-01-03 16:04:23 +00:00
timecounter_cyc2time ( & fep - > tc , ebdp - > ts ) ) ;
2012-10-30 18:25:31 +00:00
spin_unlock_irqrestore ( & fep - > tmreg_lock , flags ) ;
skb_tstamp_tx ( skb , & shhwtstamps ) ;
}
2013-01-03 16:04:23 +00:00
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_READY )
2013-04-13 19:03:17 +00:00
netdev_err ( ndev , " HEY! Enet xmit interrupt and TX_READY \n " ) ;
2009-04-15 01:32:18 +00:00
2005-04-16 15:20:36 -07:00
/* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK .
*/
2006-06-27 13:19:33 +10:00
if ( status & BD_ENET_TX_DEF )
2011-01-19 11:58:12 +01:00
ndev - > stats . collisions + + ;
2006-09-13 13:24:59 -04:00
2009-04-15 01:32:18 +00:00
/* Free the sk buffer associated with this last transmit */
2005-04-16 15:20:36 -07:00
dev_kfree_skb_any ( skb ) ;
2013-03-03 17:34:25 +00:00
fep - > tx_skbuff [ index ] = NULL ;
fep - > dirty_tx = bdp ;
2006-09-13 13:24:59 -04:00
2009-04-15 01:32:18 +00:00
/* Update pointer to next buffer descriptor to be transmitted */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2006-09-13 13:24:59 -04:00
2009-04-15 01:32:18 +00:00
/* Since we have freed up a buffer, the ring is no longer full
2005-04-16 15:20:36 -07:00
*/
2013-03-03 17:34:25 +00:00
if ( fep - > dirty_tx ! = fep - > cur_tx ) {
2011-01-19 11:58:12 +01:00
if ( netif_queue_stopped ( ndev ) )
netif_wake_queue ( ndev ) ;
2005-04-16 15:20:36 -07:00
}
}
2013-03-03 17:34:25 +00:00
return ;
2005-04-16 15:20:36 -07:00
}
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring , if the next incoming buffer has
* not been given to the system , we just set the empty indicator ,
* effectively tossing the packet .
*/
2013-01-28 18:31:42 +00:00
static int
fec_enet_rx ( struct net_device * ndev , int budget )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-01-05 21:13:13 +00:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
2009-04-15 01:32:16 +00:00
struct bufdesc * bdp ;
2006-06-27 13:19:33 +10:00
unsigned short status ;
2005-04-16 15:20:36 -07:00
struct sk_buff * skb ;
ushort pkt_len ;
__u8 * data ;
2013-01-28 18:31:42 +00:00
int pkt_received = 0 ;
2013-07-02 22:52:56 +01:00
struct bufdesc_ex * ebdp = NULL ;
bool vlan_packet_rcvd = false ;
u16 vlan_tag ;
2013-11-14 09:57:10 +08:00
int index = 0 ;
2006-09-13 13:24:59 -04:00
2006-06-27 13:19:33 +10:00
# ifdef CONFIG_M532x
flush_cache_all ( ) ;
2006-09-13 13:24:59 -04:00
# endif
2005-04-16 15:20:36 -07:00
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition .
*/
bdp = fep - > cur_rx ;
2009-04-15 01:32:18 +00:00
while ( ! ( ( status = bdp - > cbd_sc ) & BD_ENET_RX_EMPTY ) ) {
2005-04-16 15:20:36 -07:00
2013-01-28 18:31:42 +00:00
if ( pkt_received > = budget )
break ;
pkt_received + + ;
2009-04-15 01:32:18 +00:00
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set .
*/
if ( ( status & BD_ENET_RX_LAST ) = = 0 )
2013-04-13 19:03:17 +00:00
netdev_err ( ndev , " rcv is not +last \n " ) ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
if ( ! fep - > opened )
goto rx_processing_done ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* Check for errors. */
if ( status & ( BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
2005-04-16 15:20:36 -07:00
BD_ENET_RX_CR | BD_ENET_RX_OV ) ) {
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_errors + + ;
2009-04-15 01:32:18 +00:00
if ( status & ( BD_ENET_RX_LG | BD_ENET_RX_SH ) ) {
/* Frame too long or too short. */
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_length_errors + + ;
2009-04-15 01:32:18 +00:00
}
if ( status & BD_ENET_RX_NO ) /* Frame alignment */
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_frame_errors + + ;
2009-04-15 01:32:18 +00:00
if ( status & BD_ENET_RX_CR ) /* CRC Error */
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_crc_errors + + ;
2009-04-15 01:32:18 +00:00
if ( status & BD_ENET_RX_OV ) /* FIFO overrun */
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_fifo_errors + + ;
2005-04-16 15:20:36 -07:00
}
2009-04-15 01:32:18 +00:00
/* Report late collisions as a frame error.
* On this error , the BD is closed , but we don ' t know what we
* have in the buffer . So , just drop this frame on the floor .
*/
if ( status & BD_ENET_RX_CL ) {
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_errors + + ;
ndev - > stats . rx_frame_errors + + ;
2009-04-15 01:32:18 +00:00
goto rx_processing_done ;
}
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* Process the incoming frame. */
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_packets + + ;
2009-04-15 01:32:18 +00:00
pkt_len = bdp - > cbd_datlen ;
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_bytes + = pkt_len ;
2005-04-16 15:20:36 -07:00
2013-11-14 09:57:10 +08:00
if ( fep - > bufdesc_ex )
index = ( struct bufdesc_ex * ) bdp -
( struct bufdesc_ex * ) fep - > rx_bd_base ;
else
index = bdp - fep - > rx_bd_base ;
data = fep - > rx_skbuff [ index ] - > data ;
dma_sync_single_for_cpu ( & fep - > pdev - > dev , bdp - > cbd_bufaddr ,
FEC_ENET_RX_FRSIZE , DMA_FROM_DEVICE ) ;
2009-01-28 23:03:09 +00:00
2011-01-05 21:13:13 +00:00
if ( id_entry - > driver_data & FEC_QUIRK_SWAP_FRAME )
swap_buffer ( data , pkt_len ) ;
2013-07-02 22:52:56 +01:00
/* Extract the enhanced buffer descriptor */
ebdp = NULL ;
if ( fep - > bufdesc_ex )
ebdp = ( struct bufdesc_ex * ) bdp ;
/* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false ;
if ( ( ndev - > features & NETIF_F_HW_VLAN_CTAG_RX ) & &
fep - > bufdesc_ex & & ( ebdp - > cbd_esc & BD_ENET_RX_VLAN ) ) {
/* Push and remove the vlan tag */
struct vlan_hdr * vlan_header =
( struct vlan_hdr * ) ( data + ETH_HLEN ) ;
vlan_tag = ntohs ( vlan_header - > h_vlan_TCI ) ;
pkt_len - = VLAN_HLEN ;
vlan_packet_rcvd = true ;
}
2009-04-15 01:32:18 +00:00
/* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS , but we don ' t want to
* include that when passing upstream as it messes up
* bridging applications .
*/
2012-02-07 08:27:31 +00:00
skb = netdev_alloc_skb ( ndev , pkt_len - 4 + NET_IP_ALIGN ) ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:21 +00:00
if ( unlikely ( ! skb ) ) {
2011-01-19 11:58:12 +01:00
ndev - > stats . rx_dropped + + ;
2009-04-15 01:32:18 +00:00
} else {
2013-07-02 22:52:56 +01:00
int payload_offset = ( 2 * ETH_ALEN ) ;
2009-04-15 01:32:21 +00:00
skb_reserve ( skb , NET_IP_ALIGN ) ;
2009-04-15 01:32:18 +00:00
skb_put ( skb , pkt_len - 4 ) ; /* Make room */
2013-07-02 22:52:56 +01:00
/* Extract the frame data without the VLAN header. */
skb_copy_to_linear_data ( skb , data , ( 2 * ETH_ALEN ) ) ;
if ( vlan_packet_rcvd )
payload_offset = ( 2 * ETH_ALEN ) + VLAN_HLEN ;
skb_copy_to_linear_data_offset ( skb , ( 2 * ETH_ALEN ) ,
data + payload_offset ,
pkt_len - 4 - ( 2 * ETH_ALEN ) ) ;
2011-01-19 11:58:12 +01:00
skb - > protocol = eth_type_trans ( skb , ndev ) ;
2013-01-03 16:04:23 +00:00
2012-10-30 18:25:31 +00:00
/* Get receive timestamp from the skb */
2013-01-03 16:04:23 +00:00
if ( fep - > hwts_rx_en & & fep - > bufdesc_ex ) {
2012-10-30 18:25:31 +00:00
struct skb_shared_hwtstamps * shhwtstamps =
skb_hwtstamps ( skb ) ;
unsigned long flags ;
memset ( shhwtstamps , 0 , sizeof ( * shhwtstamps ) ) ;
spin_lock_irqsave ( & fep - > tmreg_lock , flags ) ;
shhwtstamps - > hwtstamp = ns_to_ktime (
2013-01-03 16:04:23 +00:00
timecounter_cyc2time ( & fep - > tc , ebdp - > ts ) ) ;
2012-10-30 18:25:31 +00:00
spin_unlock_irqrestore ( & fep - > tmreg_lock , flags ) ;
}
2013-01-03 16:04:23 +00:00
2013-04-19 08:10:49 +00:00
if ( fep - > bufdesc_ex & &
2013-07-02 22:52:56 +01:00
( fep - > csum_flags & FLAG_RX_CSUM_ENABLED ) ) {
2013-04-19 08:10:49 +00:00
if ( ! ( ebdp - > cbd_esc & FLAG_RX_CSUM_ERROR ) ) {
/* don't check it */
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
} else {
skb_checksum_none_assert ( skb ) ;
}
}
2013-07-02 22:52:56 +01:00
/* Handle received VLAN packets */
if ( vlan_packet_rcvd )
__vlan_hwaccel_put_tag ( skb ,
htons ( ETH_P_8021Q ) ,
vlan_tag ) ;
2013-08-30 20:28:10 +02:00
napi_gro_receive ( & fep - > napi , skb ) ;
2009-04-15 01:32:18 +00:00
}
2009-04-15 01:32:24 +00:00
2013-11-14 09:57:10 +08:00
dma_sync_single_for_device ( & fep - > pdev - > dev , bdp - > cbd_bufaddr ,
FEC_ENET_RX_FRSIZE , DMA_FROM_DEVICE ) ;
2009-04-15 01:32:18 +00:00
rx_processing_done :
/* Clear the status flags for this buffer */
status & = ~ BD_ENET_RX_STATS ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* Mark the buffer empty */
status | = BD_ENET_RX_EMPTY ;
bdp - > cbd_sc = status ;
2006-09-13 13:24:59 -04:00
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex ) {
struct bufdesc_ex * ebdp = ( struct bufdesc_ex * ) bdp ;
ebdp - > cbd_esc = BD_ENET_RX_INT ;
ebdp - > cbd_prot = 0 ;
ebdp - > cbd_bdu = 0 ;
}
2012-10-30 18:25:31 +00:00
2009-04-15 01:32:18 +00:00
/* Update BD pointer to next entry */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2009-04-15 01:32:18 +00:00
/* Doing this here will keep the FEC running while we process
* incoming frames . On a heavily loaded network , we should be
* able to keep up at the expense of system resources .
*/
writel ( 0 , fep - > hwp + FEC_R_DES_ACTIVE ) ;
}
2009-04-15 01:32:16 +00:00
fep - > cur_rx = bdp ;
2005-04-16 15:20:36 -07:00
2013-01-28 18:31:42 +00:00
return pkt_received ;
2005-04-16 15:20:36 -07:00
}
2011-01-19 20:47:04 +01:00
static irqreturn_t
fec_enet_interrupt ( int irq , void * dev_id )
{
struct net_device * ndev = dev_id ;
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
uint int_events ;
irqreturn_t ret = IRQ_NONE ;
do {
int_events = readl ( fep - > hwp + FEC_IEVENT ) ;
writel ( int_events , fep - > hwp + FEC_IEVENT ) ;
2013-03-03 17:34:25 +00:00
if ( int_events & ( FEC_ENET_RXF | FEC_ENET_TXF ) ) {
2011-01-19 20:47:04 +01:00
ret = IRQ_HANDLED ;
2013-01-28 18:31:42 +00:00
/* Disable the RX interrupt */
if ( napi_schedule_prep ( & fep - > napi ) ) {
writel ( FEC_RX_DISABLED_IMASK ,
fep - > hwp + FEC_IMASK ) ;
__napi_schedule ( & fep - > napi ) ;
}
2011-01-19 20:47:04 +01:00
}
if ( int_events & FEC_ENET_MII ) {
ret = IRQ_HANDLED ;
complete ( & fep - > mdio_done ) ;
}
} while ( int_events ) ;
return ret ;
}
2013-01-28 18:31:42 +00:00
static int fec_enet_rx_napi ( struct napi_struct * napi , int budget )
{
struct net_device * ndev = napi - > dev ;
int pkts = fec_enet_rx ( ndev , budget ) ;
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-01-19 20:47:04 +01:00
2013-03-03 17:34:25 +00:00
fec_enet_tx ( ndev ) ;
2013-01-28 18:31:42 +00:00
if ( pkts < budget ) {
napi_complete ( napi ) ;
writel ( FEC_DEFAULT_IMASK , fep - > hwp + FEC_IMASK ) ;
}
return pkts ;
}
2011-01-19 20:47:04 +01:00
2010-03-31 02:10:44 +00:00
/* ------------------------------------------------------------------------- */
2013-01-07 17:42:56 +00:00
static void fec_get_mac ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2013-08-30 13:56:26 +09:00
struct fec_platform_data * pdata = dev_get_platdata ( & fep - > pdev - > dev ) ;
2010-03-31 02:10:44 +00:00
unsigned char * iap , tmpaddr [ ETH_ALEN ] ;
2005-04-16 15:20:36 -07:00
2011-01-05 21:13:11 +00:00
/*
* try to get mac address in following order :
*
* 1 ) module parameter via kernel command line in form
* fec . macaddr = 0x00 , 0x04 , 0x9f , 0x01 , 0x30 , 0xe0
*/
iap = macaddr ;
2011-06-25 02:04:35 +08:00
/*
* 2 ) from device tree data
*/
if ( ! is_valid_ether_addr ( iap ) ) {
struct device_node * np = fep - > pdev - > dev . of_node ;
if ( np ) {
const char * mac = of_get_mac_address ( np ) ;
if ( mac )
iap = ( unsigned char * ) mac ;
}
}
2011-01-05 21:13:11 +00:00
/*
2011-06-25 02:04:35 +08:00
* 3 ) from flash or fuse ( via platform data )
2011-01-05 21:13:11 +00:00
*/
if ( ! is_valid_ether_addr ( iap ) ) {
# ifdef CONFIG_M5272
if ( FEC_FLASHMAC )
iap = ( unsigned char * ) FEC_FLASHMAC ;
# else
if ( pdata )
2011-12-07 21:59:29 +00:00
iap = ( unsigned char * ) & pdata - > mac ;
2011-01-05 21:13:11 +00:00
# endif
}
/*
2011-06-25 02:04:35 +08:00
* 4 ) FEC mac registers set by bootloader
2011-01-05 21:13:11 +00:00
*/
if ( ! is_valid_ether_addr ( iap ) ) {
2013-08-29 11:25:14 +03:00
* ( ( __be32 * ) & tmpaddr [ 0 ] ) =
cpu_to_be32 ( readl ( fep - > hwp + FEC_ADDR_LOW ) ) ;
* ( ( __be16 * ) & tmpaddr [ 4 ] ) =
cpu_to_be16 ( readl ( fep - > hwp + FEC_ADDR_HIGH ) > > 16 ) ;
2010-03-31 02:10:44 +00:00
iap = & tmpaddr [ 0 ] ;
2005-04-16 15:20:36 -07:00
}
2013-06-03 00:38:39 +00:00
/*
* 5 ) random mac address
*/
if ( ! is_valid_ether_addr ( iap ) ) {
/* Report it and use a random ethernet address instead */
netdev_err ( ndev , " Invalid MAC address: %pM \n " , iap ) ;
eth_hw_addr_random ( ndev ) ;
netdev_info ( ndev , " Using random MAC address: %pM \n " ,
ndev - > dev_addr ) ;
return ;
}
2011-01-19 11:58:12 +01:00
memcpy ( ndev - > dev_addr , iap , ETH_ALEN ) ;
2005-04-16 15:20:36 -07:00
2011-01-05 21:13:11 +00:00
/* Adjust MAC if using macaddr */
if ( iap = = macaddr )
2011-12-05 05:01:15 +00:00
ndev - > dev_addr [ ETH_ALEN - 1 ] = macaddr [ ETH_ALEN - 1 ] + fep - > dev_id ;
2005-04-16 15:20:36 -07:00
}
2010-03-31 02:10:44 +00:00
/* ------------------------------------------------------------------------- */
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
/*
* Phy section
*/
2011-01-19 11:58:12 +01:00
static void fec_enet_adjust_link ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2010-03-31 02:10:44 +00:00
struct phy_device * phy_dev = fep - > phy_dev ;
int status_change = 0 ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
/* Prevent a state halted on mii error */
if ( fep - > mii_timeout & & phy_dev - > state = = PHY_HALTED ) {
phy_dev - > state = PHY_RESUMING ;
2013-05-07 14:08:44 +00:00
return ;
2010-03-31 02:10:44 +00:00
}
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
if ( phy_dev - > link ) {
2013-03-14 05:12:01 +00:00
if ( ! fep - > link ) {
2011-12-07 21:59:27 +00:00
fep - > link = phy_dev - > link ;
2010-03-31 02:10:44 +00:00
status_change = 1 ;
}
2005-04-16 15:20:36 -07:00
2013-03-14 05:12:01 +00:00
if ( fep - > full_duplex ! = phy_dev - > duplex )
status_change = 1 ;
if ( phy_dev - > speed ! = fep - > speed ) {
fep - > speed = phy_dev - > speed ;
status_change = 1 ;
}
/* if any of the above changed restart the FEC */
if ( status_change )
2011-01-19 11:58:12 +01:00
fec_restart ( ndev , phy_dev - > duplex ) ;
2013-03-14 05:12:01 +00:00
} else {
if ( fep - > link ) {
2011-01-19 11:58:12 +01:00
fec_stop ( ndev ) ;
2013-04-16 00:42:42 +00:00
fep - > link = phy_dev - > link ;
2013-03-14 05:12:01 +00:00
status_change = 1 ;
}
2005-04-16 15:20:36 -07:00
}
2006-09-13 13:24:59 -04:00
2010-03-31 02:10:44 +00:00
if ( status_change )
phy_print_status ( phy_dev ) ;
}
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
static int fec_enet_mdio_read ( struct mii_bus * bus , int mii_id , int regnum )
2005-04-16 15:20:36 -07:00
{
2010-03-31 02:10:44 +00:00
struct fec_enet_private * fep = bus - > priv ;
2010-07-11 21:12:51 +00:00
unsigned long time_left ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
fep - > mii_timeout = 0 ;
2010-07-11 21:12:51 +00:00
init_completion ( & fep - > mdio_done ) ;
2010-03-31 02:10:44 +00:00
/* start a read op */
writel ( FEC_MMFR_ST | FEC_MMFR_OP_READ |
FEC_MMFR_PA ( mii_id ) | FEC_MMFR_RA ( regnum ) |
FEC_MMFR_TA , fep - > hwp + FEC_MII_DATA ) ;
/* wait for end of transfer */
2010-07-11 21:12:51 +00:00
time_left = wait_for_completion_timeout ( & fep - > mdio_done ,
usecs_to_jiffies ( FEC_MII_TIMEOUT ) ) ;
if ( time_left = = 0 ) {
fep - > mii_timeout = 1 ;
2013-04-13 19:03:17 +00:00
netdev_err ( fep - > netdev , " MDIO read timeout \n " ) ;
2010-07-11 21:12:51 +00:00
return - ETIMEDOUT ;
2005-04-16 15:20:36 -07:00
}
2010-03-31 02:10:44 +00:00
/* return value */
return FEC_MMFR_DATA ( readl ( fep - > hwp + FEC_MII_DATA ) ) ;
2005-09-12 11:18:10 +10:00
}
2006-09-13 13:24:59 -04:00
2010-03-31 02:10:44 +00:00
static int fec_enet_mdio_write ( struct mii_bus * bus , int mii_id , int regnum ,
u16 value )
2005-04-16 15:20:36 -07:00
{
2010-03-31 02:10:44 +00:00
struct fec_enet_private * fep = bus - > priv ;
2010-07-11 21:12:51 +00:00
unsigned long time_left ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
fep - > mii_timeout = 0 ;
2010-07-11 21:12:51 +00:00
init_completion ( & fep - > mdio_done ) ;
2005-04-16 15:20:36 -07:00
2011-01-05 21:13:09 +00:00
/* start a write op */
writel ( FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
2010-03-31 02:10:44 +00:00
FEC_MMFR_PA ( mii_id ) | FEC_MMFR_RA ( regnum ) |
FEC_MMFR_TA | FEC_MMFR_DATA ( value ) ,
fep - > hwp + FEC_MII_DATA ) ;
/* wait for end of transfer */
2010-07-11 21:12:51 +00:00
time_left = wait_for_completion_timeout ( & fep - > mdio_done ,
usecs_to_jiffies ( FEC_MII_TIMEOUT ) ) ;
if ( time_left = = 0 ) {
fep - > mii_timeout = 1 ;
2013-04-13 19:03:17 +00:00
netdev_err ( fep - > netdev , " MDIO write timeout \n " ) ;
2010-07-11 21:12:51 +00:00
return - ETIMEDOUT ;
2010-03-31 02:10:44 +00:00
}
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
return 0 ;
}
2005-04-16 15:20:36 -07:00
2014-05-20 13:22:51 +08:00
static int fec_enet_clk_enable ( struct net_device * ndev , bool enable )
{
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
int ret ;
if ( enable ) {
ret = clk_prepare_enable ( fep - > clk_ahb ) ;
if ( ret )
return ret ;
ret = clk_prepare_enable ( fep - > clk_ipg ) ;
if ( ret )
goto failed_clk_ipg ;
if ( fep - > clk_enet_out ) {
ret = clk_prepare_enable ( fep - > clk_enet_out ) ;
if ( ret )
goto failed_clk_enet_out ;
}
if ( fep - > clk_ptp ) {
ret = clk_prepare_enable ( fep - > clk_ptp ) ;
if ( ret )
goto failed_clk_ptp ;
}
} else {
clk_disable_unprepare ( fep - > clk_ahb ) ;
clk_disable_unprepare ( fep - > clk_ipg ) ;
if ( fep - > clk_enet_out )
clk_disable_unprepare ( fep - > clk_enet_out ) ;
if ( fep - > clk_ptp )
clk_disable_unprepare ( fep - > clk_ptp ) ;
}
return 0 ;
failed_clk_ptp :
if ( fep - > clk_enet_out )
clk_disable_unprepare ( fep - > clk_enet_out ) ;
failed_clk_enet_out :
clk_disable_unprepare ( fep - > clk_ipg ) ;
failed_clk_ipg :
clk_disable_unprepare ( fep - > clk_ahb ) ;
return ret ;
}
2011-01-19 11:58:12 +01:00
static int fec_enet_mii_probe ( struct net_device * ndev )
2005-11-07 14:09:50 +10:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-09-23 02:12:48 +00:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
2010-03-31 02:10:44 +00:00
struct phy_device * phy_dev = NULL ;
2010-10-11 21:03:05 +00:00
char mdio_bus_id [ MII_BUS_ID_SIZE ] ;
char phy_name [ MII_BUS_ID_SIZE + 3 ] ;
int phy_id ;
2011-12-05 05:01:15 +00:00
int dev_id = fep - > dev_id ;
2005-11-07 14:09:50 +10:00
2010-05-28 03:40:39 -07:00
fep - > phy_dev = NULL ;
2010-10-11 21:03:05 +00:00
/* check for attached phy */
for ( phy_id = 0 ; ( phy_id < PHY_MAX_ADDR ) ; phy_id + + ) {
if ( ( fep - > mii_bus - > phy_mask & ( 1 < < phy_id ) ) )
continue ;
if ( fep - > mii_bus - > phy_map [ phy_id ] = = NULL )
continue ;
if ( fep - > mii_bus - > phy_map [ phy_id ] - > phy_id = = 0 )
continue ;
2011-01-05 21:13:13 +00:00
if ( dev_id - - )
continue ;
2010-10-11 21:03:05 +00:00
strncpy ( mdio_bus_id , fep - > mii_bus - > id , MII_BUS_ID_SIZE ) ;
break ;
2010-03-31 02:10:44 +00:00
}
2005-04-16 15:20:36 -07:00
2010-10-11 21:03:05 +00:00
if ( phy_id > = PHY_MAX_ADDR ) {
2013-04-13 19:03:17 +00:00
netdev_info ( ndev , " no PHY, assuming direct connection to switch \n " ) ;
2012-02-13 01:23:22 +00:00
strncpy ( mdio_bus_id , " fixed-0 " , MII_BUS_ID_SIZE ) ;
2010-10-11 21:03:05 +00:00
phy_id = 0 ;
}
2012-01-29 22:08:12 +00:00
snprintf ( phy_name , sizeof ( phy_name ) , PHY_ID_FMT , mdio_bus_id , phy_id ) ;
2013-01-14 00:52:52 +00:00
phy_dev = phy_connect ( ndev , phy_name , & fec_enet_adjust_link ,
2011-09-23 02:12:48 +00:00
fep - > phy_interface ) ;
2010-10-11 21:03:05 +00:00
if ( IS_ERR ( phy_dev ) ) {
2013-04-13 19:03:17 +00:00
netdev_err ( ndev , " could not attach to PHY \n " ) ;
2010-10-11 21:03:05 +00:00
return PTR_ERR ( phy_dev ) ;
2010-03-31 02:10:44 +00:00
}
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
/* mask with MAC supported features */
2013-01-16 16:55:58 +00:00
if ( id_entry - > driver_data & FEC_QUIRK_HAS_GBIT ) {
2011-09-23 02:12:48 +00:00
phy_dev - > supported & = PHY_GBIT_FEATURES ;
2013-06-18 10:04:59 -07:00
# if !defined(CONFIG_M5272)
2013-01-16 16:55:58 +00:00
phy_dev - > supported | = SUPPORTED_Pause ;
2013-06-18 10:04:59 -07:00
# endif
2013-01-16 16:55:58 +00:00
}
2011-09-23 02:12:48 +00:00
else
phy_dev - > supported & = PHY_BASIC_FEATURES ;
2010-03-31 02:10:44 +00:00
phy_dev - > advertising = phy_dev - > supported ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
fep - > phy_dev = phy_dev ;
fep - > link = 0 ;
fep - > full_duplex = 0 ;
2005-04-16 15:20:36 -07:00
2013-04-13 19:03:17 +00:00
netdev_info ( ndev , " Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d) \n " ,
fep - > phy_dev - > drv - > name , dev_name ( & fep - > phy_dev - > dev ) ,
fep - > phy_dev - > irq ) ;
2010-05-28 03:40:39 -07:00
2010-03-31 02:10:44 +00:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2010-03-31 02:10:44 +00:00
static int fec_enet_mii_init ( struct platform_device * pdev )
2005-11-07 14:09:50 +10:00
{
2011-01-05 21:13:13 +00:00
static struct mii_bus * fec0_mii_bus ;
2011-01-19 11:58:12 +01:00
struct net_device * ndev = platform_get_drvdata ( pdev ) ;
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2011-01-05 21:13:13 +00:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
2010-03-31 02:10:44 +00:00
int err = - ENXIO , i ;
2006-06-27 13:10:56 +10:00
2011-01-05 21:13:13 +00:00
/*
* The dual fec interfaces are not equivalent with enet - mac .
* Here are the differences :
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
* - fec0 acts as the 1588 time master while fec1 is slave
* - external phys can only be configured by fec0
*
* That is to say fec1 can not work independently . It only works
* when fec0 is working . The reason behind this design is that the
* second interface is added primarily for Switch mode .
*
* Because of the last point above , both phys are attached on fec0
* mdio interface in board design , and need to be configured by
* fec0 mii_bus .
*/
2011-12-05 05:01:15 +00:00
if ( ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC ) & & fep - > dev_id > 0 ) {
2011-01-05 21:13:13 +00:00
/* fec1 uses fec0 mii_bus */
2011-12-07 21:59:31 +00:00
if ( mii_cnt & & fec0_mii_bus ) {
fep - > mii_bus = fec0_mii_bus ;
mii_cnt + + ;
return 0 ;
}
return - ENOENT ;
2011-01-05 21:13:13 +00:00
}
2010-03-31 02:10:44 +00:00
fep - > mii_timeout = 0 ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
/*
* Set MII speed to 2.5 MHz ( = clk_get_rate ( ) / 2 * phy_speed )
2011-09-23 02:12:48 +00:00
*
* The formula for FEC MDC is ' ref_freq / ( MII_SPEED x 2 ) ' while
* for ENET - MAC is ' ref_freq / ( ( MII_SPEED + 1 ) x 2 ) ' . The i . MX28
* Reference Manual has an error on this , and gets fixed on i . MX6Q
* document .
2010-03-31 02:10:44 +00:00
*/
2012-03-07 09:30:49 +01:00
fep - > phy_speed = DIV_ROUND_UP ( clk_get_rate ( fep - > clk_ahb ) , 5000000 ) ;
2011-09-23 02:12:48 +00:00
if ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC )
fep - > phy_speed - - ;
fep - > phy_speed < < = 1 ;
2010-03-31 02:10:44 +00:00
writel ( fep - > phy_speed , fep - > hwp + FEC_MII_SPEED ) ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
fep - > mii_bus = mdiobus_alloc ( ) ;
if ( fep - > mii_bus = = NULL ) {
err = - ENOMEM ;
goto err_out ;
2005-04-16 15:20:36 -07:00
}
2010-03-31 02:10:44 +00:00
fep - > mii_bus - > name = " fec_enet_mii_bus " ;
fep - > mii_bus - > read = fec_enet_mdio_read ;
fep - > mii_bus - > write = fec_enet_mdio_write ;
2012-01-09 23:59:13 +00:00
snprintf ( fep - > mii_bus - > id , MII_BUS_ID_SIZE , " %s-%x " ,
pdev - > name , fep - > dev_id + 1 ) ;
2010-03-31 02:10:44 +00:00
fep - > mii_bus - > priv = fep ;
fep - > mii_bus - > parent = & pdev - > dev ;
fep - > mii_bus - > irq = kmalloc ( sizeof ( int ) * PHY_MAX_ADDR , GFP_KERNEL ) ;
if ( ! fep - > mii_bus - > irq ) {
err = - ENOMEM ;
goto err_out_free_mdiobus ;
2005-04-16 15:20:36 -07:00
}
2010-03-31 02:10:44 +00:00
for ( i = 0 ; i < PHY_MAX_ADDR ; i + + )
fep - > mii_bus - > irq [ i ] = PHY_POLL ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
if ( mdiobus_register ( fep - > mii_bus ) )
goto err_out_free_mdio_irq ;
2005-04-16 15:20:36 -07:00
2011-12-07 21:59:31 +00:00
mii_cnt + + ;
2011-01-05 21:13:13 +00:00
/* save fec0 mii_bus */
if ( id_entry - > driver_data & FEC_QUIRK_ENET_MAC )
fec0_mii_bus = fep - > mii_bus ;
2010-03-31 02:10:44 +00:00
return 0 ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
err_out_free_mdio_irq :
kfree ( fep - > mii_bus - > irq ) ;
err_out_free_mdiobus :
mdiobus_free ( fep - > mii_bus ) ;
err_out :
return err ;
2005-04-16 15:20:36 -07:00
}
2010-03-31 02:10:44 +00:00
static void fec_enet_mii_remove ( struct fec_enet_private * fep )
2005-04-16 15:20:36 -07:00
{
2011-12-07 21:59:31 +00:00
if ( - - mii_cnt = = 0 ) {
mdiobus_unregister ( fep - > mii_bus ) ;
kfree ( fep - > mii_bus - > irq ) ;
mdiobus_free ( fep - > mii_bus ) ;
}
2005-04-16 15:20:36 -07:00
}
2011-01-19 11:58:12 +01:00
static int fec_enet_get_settings ( struct net_device * ndev ,
2010-03-31 02:10:44 +00:00
struct ethtool_cmd * cmd )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2010-03-31 02:10:44 +00:00
struct phy_device * phydev = fep - > phy_dev ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
if ( ! phydev )
return - ENODEV ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
return phy_ethtool_gset ( phydev , cmd ) ;
2005-04-16 15:20:36 -07:00
}
2011-01-19 11:58:12 +01:00
static int fec_enet_set_settings ( struct net_device * ndev ,
2010-03-31 02:10:44 +00:00
struct ethtool_cmd * cmd )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2010-03-31 02:10:44 +00:00
struct phy_device * phydev = fep - > phy_dev ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
if ( ! phydev )
return - ENODEV ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
return phy_ethtool_sset ( phydev , cmd ) ;
2005-04-16 15:20:36 -07:00
}
2011-01-19 11:58:12 +01:00
static void fec_enet_get_drvinfo ( struct net_device * ndev ,
2010-03-31 02:10:44 +00:00
struct ethtool_drvinfo * info )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2006-09-13 13:24:59 -04:00
2013-01-06 00:44:26 +00:00
strlcpy ( info - > driver , fep - > pdev - > dev . driver - > name ,
sizeof ( info - > driver ) ) ;
strlcpy ( info - > version , " Revision: 1.0 " , sizeof ( info - > version ) ) ;
strlcpy ( info - > bus_info , dev_name ( & ndev - > dev ) , sizeof ( info - > bus_info ) ) ;
2005-04-16 15:20:36 -07:00
}
2013-01-06 16:25:07 +00:00
static int fec_enet_get_ts_info ( struct net_device * ndev ,
struct ethtool_ts_info * info )
{
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
if ( fep - > bufdesc_ex ) {
info - > so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE ;
if ( fep - > ptp_clock )
info - > phc_index = ptp_clock_index ( fep - > ptp_clock ) ;
else
info - > phc_index = - 1 ;
info - > tx_types = ( 1 < < HWTSTAMP_TX_OFF ) |
( 1 < < HWTSTAMP_TX_ON ) ;
info - > rx_filters = ( 1 < < HWTSTAMP_FILTER_NONE ) |
( 1 < < HWTSTAMP_FILTER_ALL ) ;
return 0 ;
} else {
return ethtool_op_get_ts_info ( ndev , info ) ;
}
}
2013-06-18 10:04:59 -07:00
# if !defined(CONFIG_M5272)
2013-01-16 16:55:58 +00:00
static void fec_enet_get_pauseparam ( struct net_device * ndev ,
struct ethtool_pauseparam * pause )
{
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
pause - > autoneg = ( fep - > pause_flag & FEC_PAUSE_FLAG_AUTONEG ) ! = 0 ;
pause - > tx_pause = ( fep - > pause_flag & FEC_PAUSE_FLAG_ENABLE ) ! = 0 ;
pause - > rx_pause = pause - > tx_pause ;
}
static int fec_enet_set_pauseparam ( struct net_device * ndev ,
struct ethtool_pauseparam * pause )
{
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
if ( pause - > tx_pause ! = pause - > rx_pause ) {
netdev_info ( ndev ,
" hardware only support enable/disable both tx and rx " ) ;
return - EINVAL ;
}
fep - > pause_flag = 0 ;
/* tx pause must be same as rx pause */
fep - > pause_flag | = pause - > rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0 ;
fep - > pause_flag | = pause - > autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0 ;
if ( pause - > rx_pause | | pause - > autoneg ) {
fep - > phy_dev - > supported | = ADVERTISED_Pause ;
fep - > phy_dev - > advertising | = ADVERTISED_Pause ;
} else {
fep - > phy_dev - > supported & = ~ ADVERTISED_Pause ;
fep - > phy_dev - > advertising & = ~ ADVERTISED_Pause ;
}
if ( pause - > autoneg ) {
if ( netif_running ( ndev ) )
fec_stop ( ndev ) ;
phy_start_aneg ( fep - > phy_dev ) ;
}
if ( netif_running ( ndev ) )
fec_restart ( ndev , 0 ) ;
return 0 ;
}
2013-06-25 23:18:52 -07:00
static const struct fec_stat {
char name [ ETH_GSTRING_LEN ] ;
u16 offset ;
} fec_stats [ ] = {
/* RMON TX */
{ " tx_dropped " , RMON_T_DROP } ,
{ " tx_packets " , RMON_T_PACKETS } ,
{ " tx_broadcast " , RMON_T_BC_PKT } ,
{ " tx_multicast " , RMON_T_MC_PKT } ,
{ " tx_crc_errors " , RMON_T_CRC_ALIGN } ,
{ " tx_undersize " , RMON_T_UNDERSIZE } ,
{ " tx_oversize " , RMON_T_OVERSIZE } ,
{ " tx_fragment " , RMON_T_FRAG } ,
{ " tx_jabber " , RMON_T_JAB } ,
{ " tx_collision " , RMON_T_COL } ,
{ " tx_64byte " , RMON_T_P64 } ,
{ " tx_65to127byte " , RMON_T_P65TO127 } ,
{ " tx_128to255byte " , RMON_T_P128TO255 } ,
{ " tx_256to511byte " , RMON_T_P256TO511 } ,
{ " tx_512to1023byte " , RMON_T_P512TO1023 } ,
{ " tx_1024to2047byte " , RMON_T_P1024TO2047 } ,
{ " tx_GTE2048byte " , RMON_T_P_GTE2048 } ,
{ " tx_octets " , RMON_T_OCTETS } ,
/* IEEE TX */
{ " IEEE_tx_drop " , IEEE_T_DROP } ,
{ " IEEE_tx_frame_ok " , IEEE_T_FRAME_OK } ,
{ " IEEE_tx_1col " , IEEE_T_1COL } ,
{ " IEEE_tx_mcol " , IEEE_T_MCOL } ,
{ " IEEE_tx_def " , IEEE_T_DEF } ,
{ " IEEE_tx_lcol " , IEEE_T_LCOL } ,
{ " IEEE_tx_excol " , IEEE_T_EXCOL } ,
{ " IEEE_tx_macerr " , IEEE_T_MACERR } ,
{ " IEEE_tx_cserr " , IEEE_T_CSERR } ,
{ " IEEE_tx_sqe " , IEEE_T_SQE } ,
{ " IEEE_tx_fdxfc " , IEEE_T_FDXFC } ,
{ " IEEE_tx_octets_ok " , IEEE_T_OCTETS_OK } ,
/* RMON RX */
{ " rx_packets " , RMON_R_PACKETS } ,
{ " rx_broadcast " , RMON_R_BC_PKT } ,
{ " rx_multicast " , RMON_R_MC_PKT } ,
{ " rx_crc_errors " , RMON_R_CRC_ALIGN } ,
{ " rx_undersize " , RMON_R_UNDERSIZE } ,
{ " rx_oversize " , RMON_R_OVERSIZE } ,
{ " rx_fragment " , RMON_R_FRAG } ,
{ " rx_jabber " , RMON_R_JAB } ,
{ " rx_64byte " , RMON_R_P64 } ,
{ " rx_65to127byte " , RMON_R_P65TO127 } ,
{ " rx_128to255byte " , RMON_R_P128TO255 } ,
{ " rx_256to511byte " , RMON_R_P256TO511 } ,
{ " rx_512to1023byte " , RMON_R_P512TO1023 } ,
{ " rx_1024to2047byte " , RMON_R_P1024TO2047 } ,
{ " rx_GTE2048byte " , RMON_R_P_GTE2048 } ,
{ " rx_octets " , RMON_R_OCTETS } ,
/* IEEE RX */
{ " IEEE_rx_drop " , IEEE_R_DROP } ,
{ " IEEE_rx_frame_ok " , IEEE_R_FRAME_OK } ,
{ " IEEE_rx_crc " , IEEE_R_CRC } ,
{ " IEEE_rx_align " , IEEE_R_ALIGN } ,
{ " IEEE_rx_macerr " , IEEE_R_MACERR } ,
{ " IEEE_rx_fdxfc " , IEEE_R_FDXFC } ,
{ " IEEE_rx_octets_ok " , IEEE_R_OCTETS_OK } ,
} ;
static void fec_enet_get_ethtool_stats ( struct net_device * dev ,
struct ethtool_stats * stats , u64 * data )
{
struct fec_enet_private * fep = netdev_priv ( dev ) ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( fec_stats ) ; i + + )
data [ i ] = readl ( fep - > hwp + fec_stats [ i ] . offset ) ;
}
static void fec_enet_get_strings ( struct net_device * netdev ,
u32 stringset , u8 * data )
{
int i ;
switch ( stringset ) {
case ETH_SS_STATS :
for ( i = 0 ; i < ARRAY_SIZE ( fec_stats ) ; i + + )
memcpy ( data + i * ETH_GSTRING_LEN ,
fec_stats [ i ] . name , ETH_GSTRING_LEN ) ;
break ;
}
}
static int fec_enet_get_sset_count ( struct net_device * dev , int sset )
{
switch ( sset ) {
case ETH_SS_STATS :
return ARRAY_SIZE ( fec_stats ) ;
default :
return - EOPNOTSUPP ;
}
}
2013-06-18 10:04:59 -07:00
# endif /* !defined(CONFIG_M5272) */
2013-06-25 23:18:52 -07:00
2013-06-17 07:25:06 -07:00
static int fec_enet_nway_reset ( struct net_device * dev )
{
struct fec_enet_private * fep = netdev_priv ( dev ) ;
struct phy_device * phydev = fep - > phy_dev ;
if ( ! phydev )
return - ENODEV ;
return genphy_restart_aneg ( phydev ) ;
}
2012-01-04 12:59:49 +00:00
static const struct ethtool_ops fec_enet_ethtool_ops = {
2013-06-18 10:04:59 -07:00
# if !defined(CONFIG_M5272)
2013-01-16 16:55:58 +00:00
. get_pauseparam = fec_enet_get_pauseparam ,
. set_pauseparam = fec_enet_set_pauseparam ,
2013-06-18 10:04:59 -07:00
# endif
2010-03-31 02:10:44 +00:00
. get_settings = fec_enet_get_settings ,
. set_settings = fec_enet_set_settings ,
. get_drvinfo = fec_enet_get_drvinfo ,
. get_link = ethtool_op_get_link ,
2013-01-06 16:25:07 +00:00
. get_ts_info = fec_enet_get_ts_info ,
2013-06-17 07:25:06 -07:00
. nway_reset = fec_enet_nway_reset ,
2013-06-25 23:18:52 -07:00
# ifndef CONFIG_M5272
. get_ethtool_stats = fec_enet_get_ethtool_stats ,
. get_strings = fec_enet_get_strings ,
. get_sset_count = fec_enet_get_sset_count ,
# endif
2010-03-31 02:10:44 +00:00
} ;
2005-04-16 15:20:36 -07:00
2011-01-19 11:58:12 +01:00
static int fec_enet_ioctl ( struct net_device * ndev , struct ifreq * rq , int cmd )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2010-03-31 02:10:44 +00:00
struct phy_device * phydev = fep - > phy_dev ;
2005-04-16 15:20:36 -07:00
2011-01-19 11:58:12 +01:00
if ( ! netif_running ( ndev ) )
2010-03-31 02:10:44 +00:00
return - EINVAL ;
2005-04-16 15:20:36 -07:00
2010-03-31 02:10:44 +00:00
if ( ! phydev )
return - ENODEV ;
2013-11-18 23:02:44 +00:00
if ( fep - > bufdesc_ex ) {
if ( cmd = = SIOCSHWTSTAMP )
return fec_ptp_set ( ndev , rq ) ;
if ( cmd = = SIOCGHWTSTAMP )
return fec_ptp_get ( ndev , rq ) ;
}
2013-01-03 16:04:23 +00:00
2010-07-17 08:48:55 +00:00
return phy_mii_ioctl ( phydev , rq , cmd ) ;
2005-04-16 15:20:36 -07:00
}
2011-01-19 11:58:12 +01:00
static void fec_enet_free_buffers ( struct net_device * ndev )
2009-04-15 01:32:24 +00:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2013-03-20 12:31:07 -03:00
unsigned int i ;
2009-04-15 01:32:24 +00:00
struct sk_buff * skb ;
struct bufdesc * bdp ;
bdp = fep - > rx_bd_base ;
2013-09-03 10:41:18 +08:00
for ( i = 0 ; i < fep - > rx_ring_size ; i + + ) {
2009-04-15 01:32:24 +00:00
skb = fep - > rx_skbuff [ i ] ;
if ( bdp - > cbd_bufaddr )
2011-01-20 09:26:38 +01:00
dma_unmap_single ( & fep - > pdev - > dev , bdp - > cbd_bufaddr ,
2009-04-15 01:32:24 +00:00
FEC_ENET_RX_FRSIZE , DMA_FROM_DEVICE ) ;
if ( skb )
dev_kfree_skb ( skb ) ;
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2009-04-15 01:32:24 +00:00
}
bdp = fep - > tx_bd_base ;
2013-09-03 10:41:18 +08:00
for ( i = 0 ; i < fep - > tx_ring_size ; i + + )
2009-04-15 01:32:24 +00:00
kfree ( fep - > tx_bounce [ i ] ) ;
}
2011-01-19 11:58:12 +01:00
static int fec_enet_alloc_buffers ( struct net_device * ndev )
2009-04-15 01:32:24 +00:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2013-03-20 12:31:07 -03:00
unsigned int i ;
2009-04-15 01:32:24 +00:00
struct sk_buff * skb ;
struct bufdesc * bdp ;
bdp = fep - > rx_bd_base ;
2013-09-03 10:41:18 +08:00
for ( i = 0 ; i < fep - > rx_ring_size ; i + + ) {
2012-02-07 08:27:31 +00:00
skb = netdev_alloc_skb ( ndev , FEC_ENET_RX_FRSIZE ) ;
2009-04-15 01:32:24 +00:00
if ( ! skb ) {
2011-01-19 11:58:12 +01:00
fec_enet_free_buffers ( ndev ) ;
2009-04-15 01:32:24 +00:00
return - ENOMEM ;
}
fep - > rx_skbuff [ i ] = skb ;
2011-01-20 09:26:38 +01:00
bdp - > cbd_bufaddr = dma_map_single ( & fep - > pdev - > dev , skb - > data ,
2009-04-15 01:32:24 +00:00
FEC_ENET_RX_FRSIZE , DMA_FROM_DEVICE ) ;
2013-11-14 09:57:10 +08:00
if ( dma_mapping_error ( & fep - > pdev - > dev , bdp - > cbd_bufaddr ) ) {
fec_enet_free_buffers ( ndev ) ;
if ( net_ratelimit ( ) )
netdev_err ( ndev , " Rx DMA memory map failed \n " ) ;
return - ENOMEM ;
}
2009-04-15 01:32:24 +00:00
bdp - > cbd_sc = BD_ENET_RX_EMPTY ;
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex ) {
struct bufdesc_ex * ebdp = ( struct bufdesc_ex * ) bdp ;
ebdp - > cbd_esc = BD_ENET_RX_INT ;
}
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2009-04-15 01:32:24 +00:00
}
/* Set the last buffer to wrap. */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_prevdesc ( bdp , fep ) ;
2009-04-15 01:32:24 +00:00
bdp - > cbd_sc | = BD_SC_WRAP ;
bdp = fep - > tx_bd_base ;
2013-09-03 10:41:18 +08:00
for ( i = 0 ; i < fep - > tx_ring_size ; i + + ) {
2009-04-15 01:32:24 +00:00
fep - > tx_bounce [ i ] = kmalloc ( FEC_ENET_TX_FRSIZE , GFP_KERNEL ) ;
bdp - > cbd_sc = 0 ;
bdp - > cbd_bufaddr = 0 ;
2012-10-30 18:25:31 +00:00
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex ) {
struct bufdesc_ex * ebdp = ( struct bufdesc_ex * ) bdp ;
2013-03-26 05:25:07 +00:00
ebdp - > cbd_esc = BD_ENET_TX_INT ;
2013-01-03 16:04:23 +00:00
}
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_nextdesc ( bdp , fep ) ;
2009-04-15 01:32:24 +00:00
}
/* Set the last buffer to wrap. */
2013-09-03 10:41:18 +08:00
bdp = fec_enet_get_prevdesc ( bdp , fep ) ;
2009-04-15 01:32:24 +00:00
bdp - > cbd_sc | = BD_SC_WRAP ;
return 0 ;
}
2005-04-16 15:20:36 -07:00
static int
2011-01-19 11:58:12 +01:00
fec_enet_open ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2009-04-15 01:32:24 +00:00
int ret ;
2005-04-16 15:20:36 -07:00
2014-05-20 13:22:51 +08:00
ret = fec_enet_clk_enable ( ndev , true ) ;
if ( ret )
return ret ;
2005-04-16 15:20:36 -07:00
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that .
*/
2011-01-19 11:58:12 +01:00
ret = fec_enet_alloc_buffers ( ndev ) ;
2009-04-15 01:32:24 +00:00
if ( ret )
return ret ;
2010-05-28 03:40:39 -07:00
/* Probe and connect to PHY when open the interface */
2011-01-19 11:58:12 +01:00
ret = fec_enet_mii_probe ( ndev ) ;
2010-05-28 03:40:39 -07:00
if ( ret ) {
2011-01-19 11:58:12 +01:00
fec_enet_free_buffers ( ndev ) ;
2010-05-28 03:40:39 -07:00
return ret ;
}
2014-02-18 12:55:42 +00:00
napi_enable ( & fep - > napi ) ;
2010-03-31 02:10:44 +00:00
phy_start ( fep - > phy_dev ) ;
2011-01-19 11:58:12 +01:00
netif_start_queue ( ndev ) ;
2005-04-16 15:20:36 -07:00
fep - > opened = 1 ;
2009-04-15 01:32:18 +00:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
static int
2011-01-19 11:58:12 +01:00
fec_enet_close ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* Don't know what to do yet. */
2013-03-14 06:54:09 +00:00
napi_disable ( & fep - > napi ) ;
2005-04-16 15:20:36 -07:00
fep - > opened = 0 ;
2011-01-19 11:58:12 +01:00
netif_stop_queue ( ndev ) ;
fec_stop ( ndev ) ;
2005-04-16 15:20:36 -07:00
2011-01-17 20:04:23 +01:00
if ( fep - > phy_dev ) {
phy_stop ( fep - > phy_dev ) ;
2010-05-28 03:40:39 -07:00
phy_disconnect ( fep - > phy_dev ) ;
2011-01-17 20:04:23 +01:00
}
2010-05-28 03:40:39 -07:00
2014-05-20 13:22:51 +08:00
fec_enet_clk_enable ( ndev , false ) ;
2011-01-19 20:26:39 +01:00
fec_enet_free_buffers ( ndev ) ;
2009-04-15 01:32:24 +00:00
2005-04-16 15:20:36 -07:00
return 0 ;
}
/* Set or clear the multicast filter for this adaptor.
* Skeleton taken from sunlance driver .
* The CPM Ethernet implementation allows Multicast as well as individual
* MAC address filtering . Some of the drivers check to make sure it is
* a group multicast address , and discard those that are not . I guess I
* will do the same for now , but just remove the test if you want
* individual filtering as well ( do the upper net layers want or support
* this kind of feature ? ) .
*/
# define HASH_BITS 6 /* #bits in hash */
# define CRC32_POLY 0xEDB88320
2011-01-19 11:58:12 +01:00
static void set_multicast_list ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2010-04-01 21:22:57 +00:00
struct netdev_hw_addr * ha ;
2010-02-22 09:22:26 +00:00
unsigned int i , bit , data , crc , tmp ;
2005-04-16 15:20:36 -07:00
unsigned char hash ;
2011-01-19 11:58:12 +01:00
if ( ndev - > flags & IFF_PROMISC ) {
2009-04-15 03:11:30 +00:00
tmp = readl ( fep - > hwp + FEC_R_CNTRL ) ;
tmp | = 0x8 ;
writel ( tmp , fep - > hwp + FEC_R_CNTRL ) ;
2009-04-15 01:32:19 +00:00
return ;
}
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:19 +00:00
tmp = readl ( fep - > hwp + FEC_R_CNTRL ) ;
tmp & = ~ 0x8 ;
writel ( tmp , fep - > hwp + FEC_R_CNTRL ) ;
2011-01-19 11:58:12 +01:00
if ( ndev - > flags & IFF_ALLMULTI ) {
2009-04-15 01:32:19 +00:00
/* Catch all multicast addresses, so set the
* filter to all 1 ' s
*/
writel ( 0xffffffff , fep - > hwp + FEC_GRP_HASH_TABLE_HIGH ) ;
writel ( 0xffffffff , fep - > hwp + FEC_GRP_HASH_TABLE_LOW ) ;
return ;
}
/* Clear filter and add the addresses in hash register
*/
writel ( 0 , fep - > hwp + FEC_GRP_HASH_TABLE_HIGH ) ;
writel ( 0 , fep - > hwp + FEC_GRP_HASH_TABLE_LOW ) ;
2011-01-19 11:58:12 +01:00
netdev_for_each_mc_addr ( ha , ndev ) {
2009-04-15 01:32:19 +00:00
/* calculate crc32 value of mac address */
crc = 0xffffffff ;
2011-01-19 11:58:12 +01:00
for ( i = 0 ; i < ndev - > addr_len ; i + + ) {
2010-04-01 21:22:57 +00:00
data = ha - > addr [ i ] ;
2009-04-15 01:32:19 +00:00
for ( bit = 0 ; bit < 8 ; bit + + , data > > = 1 ) {
crc = ( crc > > 1 ) ^
( ( ( crc ^ data ) & 1 ) ? CRC32_POLY : 0 ) ;
2005-04-16 15:20:36 -07:00
}
}
2009-04-15 01:32:19 +00:00
/* only upper 6 bits (HASH_BITS) are used
* which point to specific bit in he hash registers
*/
hash = ( crc > > ( 32 - HASH_BITS ) ) & 0x3f ;
if ( hash > 31 ) {
tmp = readl ( fep - > hwp + FEC_GRP_HASH_TABLE_HIGH ) ;
tmp | = 1 < < ( hash - 32 ) ;
writel ( tmp , fep - > hwp + FEC_GRP_HASH_TABLE_HIGH ) ;
} else {
tmp = readl ( fep - > hwp + FEC_GRP_HASH_TABLE_LOW ) ;
tmp | = 1 < < hash ;
writel ( tmp , fep - > hwp + FEC_GRP_HASH_TABLE_LOW ) ;
}
2005-04-16 15:20:36 -07:00
}
}
2009-04-15 01:32:18 +00:00
/* Set a MAC change in hardware. */
2009-04-15 01:32:23 +00:00
static int
2011-01-19 11:58:12 +01:00
fec_set_mac_address ( struct net_device * ndev , void * p )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2009-04-15 01:32:23 +00:00
struct sockaddr * addr = p ;
2014-03-30 21:32:08 +02:00
if ( addr ) {
if ( ! is_valid_ether_addr ( addr - > sa_data ) )
return - EADDRNOTAVAIL ;
memcpy ( ndev - > dev_addr , addr - > sa_data , ndev - > addr_len ) ;
}
2005-04-16 15:20:36 -07:00
2011-01-19 11:58:12 +01:00
writel ( ndev - > dev_addr [ 3 ] | ( ndev - > dev_addr [ 2 ] < < 8 ) |
( ndev - > dev_addr [ 1 ] < < 16 ) | ( ndev - > dev_addr [ 0 ] < < 24 ) ,
2009-04-15 03:11:30 +00:00
fep - > hwp + FEC_ADDR_LOW ) ;
2011-01-19 11:58:12 +01:00
writel ( ( ndev - > dev_addr [ 5 ] < < 16 ) | ( ndev - > dev_addr [ 4 ] < < 24 ) ,
2010-05-05 00:55:48 -07:00
fep - > hwp + FEC_ADDR_HIGH ) ;
2009-04-15 01:32:23 +00:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2011-09-29 02:15:57 +00:00
# ifdef CONFIG_NET_POLL_CONTROLLER
2012-07-10 10:56:00 +00:00
/**
* fec_poll_controller - FEC Poll controller function
2011-09-29 02:15:57 +00:00
* @ dev : The FEC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
*
*/
2013-03-20 05:06:11 +00:00
static void fec_poll_controller ( struct net_device * dev )
2011-09-29 02:15:57 +00:00
{
int i ;
struct fec_enet_private * fep = netdev_priv ( dev ) ;
for ( i = 0 ; i < FEC_IRQ_NUM ; i + + ) {
if ( fep - > irq [ i ] > 0 ) {
disable_irq ( fep - > irq [ i ] ) ;
fec_enet_interrupt ( fep - > irq [ i ] , dev ) ;
enable_irq ( fep - > irq [ i ] ) ;
}
}
}
# endif
2013-04-19 08:10:49 +00:00
static int fec_set_features ( struct net_device * netdev ,
netdev_features_t features )
{
struct fec_enet_private * fep = netdev_priv ( netdev ) ;
netdev_features_t changed = features ^ netdev - > features ;
netdev - > features = features ;
/* Receive checksum has been changed */
if ( changed & NETIF_F_RXCSUM ) {
if ( features & NETIF_F_RXCSUM )
fep - > csum_flags | = FLAG_RX_CSUM_ENABLED ;
else
fep - > csum_flags & = ~ FLAG_RX_CSUM_ENABLED ;
if ( netif_running ( netdev ) ) {
fec_stop ( netdev ) ;
fec_restart ( netdev , fep - > phy_dev - > duplex ) ;
netif_wake_queue ( netdev ) ;
} else {
fec_restart ( netdev , fep - > phy_dev - > duplex ) ;
}
}
return 0 ;
}
2009-04-15 01:32:23 +00:00
static const struct net_device_ops fec_netdev_ops = {
. ndo_open = fec_enet_open ,
. ndo_stop = fec_enet_close ,
. ndo_start_xmit = fec_enet_start_xmit ,
2011-08-16 06:29:01 +00:00
. ndo_set_rx_mode = set_multicast_list ,
2009-07-09 17:59:01 +00:00
. ndo_change_mtu = eth_change_mtu ,
2009-04-15 01:32:23 +00:00
. ndo_validate_addr = eth_validate_addr ,
. ndo_tx_timeout = fec_timeout ,
. ndo_set_mac_address = fec_set_mac_address ,
2011-01-19 20:26:39 +01:00
. ndo_do_ioctl = fec_enet_ioctl ,
2011-09-29 02:15:57 +00:00
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = fec_poll_controller ,
# endif
2013-04-19 08:10:49 +00:00
. ndo_set_features = fec_set_features ,
2009-04-15 01:32:23 +00:00
} ;
2005-04-16 15:20:36 -07:00
/*
* XXX : We need to clean up on failure exits here .
2009-01-28 23:03:11 +00:00
*
2005-04-16 15:20:36 -07:00
*/
2011-01-19 11:58:12 +01:00
static int fec_enet_init ( struct net_device * ndev )
2005-04-16 15:20:36 -07:00
{
2011-01-19 11:58:12 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2013-05-08 21:08:22 +00:00
const struct platform_device_id * id_entry =
platform_get_device_id ( fep - > pdev ) ;
2009-04-15 01:32:24 +00:00
struct bufdesc * cbd_base ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:17 +00:00
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent ( NULL , PAGE_SIZE , & fep - > bd_dma ,
2013-03-14 13:07:21 +00:00
GFP_KERNEL ) ;
if ( ! cbd_base )
2005-11-07 14:09:50 +10:00
return - ENOMEM ;
2013-03-26 16:12:03 +00:00
memset ( cbd_base , 0 , PAGE_SIZE ) ;
2008-05-01 14:08:12 +10:00
2011-01-19 11:58:12 +01:00
fep - > netdev = ndev ;
2005-04-16 15:20:36 -07:00
2011-01-05 21:13:11 +00:00
/* Get the Ethernet address */
2011-01-19 11:58:12 +01:00
fec_get_mac ( ndev ) ;
2014-03-30 21:32:08 +02:00
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address ( ndev , NULL ) ;
2005-04-16 15:20:36 -07:00
2013-09-03 10:41:18 +08:00
/* init the tx & rx ring size */
fep - > tx_ring_size = TX_RING_SIZE ;
fep - > rx_ring_size = RX_RING_SIZE ;
2009-04-15 01:32:17 +00:00
/* Set receive and transmit descriptor base. */
2005-04-16 15:20:36 -07:00
fep - > rx_bd_base = cbd_base ;
2013-01-03 16:04:23 +00:00
if ( fep - > bufdesc_ex )
fep - > tx_bd_base = ( struct bufdesc * )
2013-09-03 10:41:18 +08:00
( ( ( struct bufdesc_ex * ) cbd_base ) + fep - > rx_ring_size ) ;
2013-01-03 16:04:23 +00:00
else
2013-09-03 10:41:18 +08:00
fep - > tx_bd_base = cbd_base + fep - > rx_ring_size ;
2005-04-16 15:20:36 -07:00
2009-04-15 01:32:18 +00:00
/* The FEC Ethernet specific entries in the device structure */
2011-01-19 11:58:12 +01:00
ndev - > watchdog_timeo = TX_TIMEOUT ;
ndev - > netdev_ops = & fec_netdev_ops ;
ndev - > ethtool_ops = & fec_enet_ethtool_ops ;
2010-02-05 08:56:20 +00:00
2013-01-28 18:31:42 +00:00
writel ( FEC_RX_DISABLED_IMASK , fep - > hwp + FEC_IMASK ) ;
2013-08-27 17:35:08 -03:00
netif_napi_add ( ndev , & fep - > napi , fec_enet_rx_napi , NAPI_POLL_WEIGHT ) ;
2013-01-28 18:31:42 +00:00
2013-07-02 22:52:56 +01:00
if ( id_entry - > driver_data & FEC_QUIRK_HAS_VLAN ) {
/* enable hw VLAN support */
ndev - > features | = NETIF_F_HW_VLAN_CTAG_RX ;
ndev - > hw_features | = NETIF_F_HW_VLAN_CTAG_RX ;
}
2013-05-08 21:08:22 +00:00
if ( id_entry - > driver_data & FEC_QUIRK_HAS_CSUM ) {
/* enable hw accelerator */
ndev - > features | = ( NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM ) ;
ndev - > hw_features | = ( NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM ) ;
fep - > csum_flags | = FLAG_RX_CSUM_ENABLED ;
}
2013-04-19 08:10:49 +00:00
2011-01-19 11:58:12 +01:00
fec_restart ( ndev , 0 ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2011-06-25 02:04:35 +08:00
# ifdef CONFIG_OF
2012-12-03 09:23:58 -05:00
static void fec_reset_phy ( struct platform_device * pdev )
2011-06-25 02:04:35 +08:00
{
int err , phy_reset ;
2012-06-27 03:45:24 +00:00
int msec = 1 ;
2011-06-25 02:04:35 +08:00
struct device_node * np = pdev - > dev . of_node ;
if ( ! np )
2011-09-23 02:12:46 +00:00
return ;
2011-06-25 02:04:35 +08:00
2012-06-27 03:45:24 +00:00
of_property_read_u32 ( np , " phy-reset-duration " , & msec ) ;
/* A sane reset duration should not be longer than 1s */
if ( msec > 1000 )
msec = 1 ;
2011-06-25 02:04:35 +08:00
phy_reset = of_get_named_gpio ( np , " phy-reset-gpios " , 0 ) ;
2013-02-18 10:20:31 +00:00
if ( ! gpio_is_valid ( phy_reset ) )
return ;
2012-06-27 03:45:22 +00:00
err = devm_gpio_request_one ( & pdev - > dev , phy_reset ,
GPIOF_OUT_INIT_LOW , " phy-reset " ) ;
2011-06-25 02:04:35 +08:00
if ( err ) {
2013-02-18 10:20:31 +00:00
dev_err ( & pdev - > dev , " failed to get phy-reset-gpios: %d \n " , err ) ;
2011-09-23 02:12:46 +00:00
return ;
2011-06-25 02:04:35 +08:00
}
2012-06-27 03:45:24 +00:00
msleep ( msec ) ;
2011-06-25 02:04:35 +08:00
gpio_set_value ( phy_reset , 1 ) ;
}
# else /* CONFIG_OF */
2013-01-07 17:42:56 +00:00
static void fec_reset_phy ( struct platform_device * pdev )
2011-06-25 02:04:35 +08:00
{
/*
* In case of platform probe , the reset has been done
* by machine code .
*/
}
# endif /* CONFIG_OF */
2012-12-03 09:23:58 -05:00
static int
2009-01-28 23:03:11 +00:00
fec_probe ( struct platform_device * pdev )
{
struct fec_enet_private * fep ;
2010-05-24 00:36:13 -07:00
struct fec_platform_data * pdata ;
2009-01-28 23:03:11 +00:00
struct net_device * ndev ;
int i , irq , ret = 0 ;
struct resource * r ;
2011-06-25 02:04:35 +08:00
const struct of_device_id * of_id ;
2011-12-05 05:01:15 +00:00
static int dev_id ;
2011-06-25 02:04:35 +08:00
of_id = of_match_device ( fec_dt_ids , & pdev - > dev ) ;
if ( of_id )
pdev - > id_entry = of_id - > data ;
2009-01-28 23:03:11 +00:00
/* Init network device */
ndev = alloc_etherdev ( sizeof ( struct fec_enet_private ) ) ;
2013-03-11 07:32:55 +00:00
if ( ! ndev )
return - ENOMEM ;
2009-01-28 23:03:11 +00:00
SET_NETDEV_DEV ( ndev , & pdev - > dev ) ;
/* setup board info structure */
fep = netdev_priv ( ndev ) ;
2013-06-18 10:04:59 -07:00
# if !defined(CONFIG_M5272)
2013-01-16 16:55:58 +00:00
/* default enable pause frame auto negotiation */
if ( pdev - > id_entry & &
( pdev - > id_entry - > driver_data & FEC_QUIRK_HAS_GBIT ) )
fep - > pause_flag | = FEC_PAUSE_FLAG_AUTONEG ;
2013-06-18 10:04:59 -07:00
# endif
2013-01-16 16:55:58 +00:00
2013-07-21 13:25:03 -03:00
r = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2013-06-10 17:05:05 +05:30
fep - > hwp = devm_ioremap_resource ( & pdev - > dev , r ) ;
if ( IS_ERR ( fep - > hwp ) ) {
ret = PTR_ERR ( fep - > hwp ) ;
goto failed_ioremap ;
}
2010-03-31 02:10:44 +00:00
fep - > pdev = pdev ;
2011-12-05 05:01:15 +00:00
fep - > dev_id = dev_id + + ;
2009-01-28 23:03:11 +00:00
2013-01-03 16:04:23 +00:00
fep - > bufdesc_ex = 0 ;
2009-01-28 23:03:11 +00:00
platform_set_drvdata ( pdev , ndev ) ;
2013-04-02 09:35:10 +00:00
ret = of_get_phy_mode ( pdev - > dev . of_node ) ;
2011-06-25 02:04:35 +08:00
if ( ret < 0 ) {
2013-08-30 13:56:26 +09:00
pdata = dev_get_platdata ( & pdev - > dev ) ;
2011-06-25 02:04:35 +08:00
if ( pdata )
fep - > phy_interface = pdata - > phy ;
else
fep - > phy_interface = PHY_INTERFACE_MODE_MII ;
} else {
fep - > phy_interface = ret ;
}
2012-03-07 09:30:49 +01:00
fep - > clk_ipg = devm_clk_get ( & pdev - > dev , " ipg " ) ;
if ( IS_ERR ( fep - > clk_ipg ) ) {
ret = PTR_ERR ( fep - > clk_ipg ) ;
2009-01-28 23:03:11 +00:00
goto failed_clk ;
}
2012-03-07 09:30:49 +01:00
fep - > clk_ahb = devm_clk_get ( & pdev - > dev , " ahb " ) ;
if ( IS_ERR ( fep - > clk_ahb ) ) {
ret = PTR_ERR ( fep - > clk_ahb ) ;
goto failed_clk ;
}
2013-01-29 15:46:11 +01:00
/* enet_out is optional, depends on board */
fep - > clk_enet_out = devm_clk_get ( & pdev - > dev , " enet_out " ) ;
if ( IS_ERR ( fep - > clk_enet_out ) )
fep - > clk_enet_out = NULL ;
2012-10-30 18:25:31 +00:00
fep - > clk_ptp = devm_clk_get ( & pdev - > dev , " ptp " ) ;
2013-02-22 06:40:45 +00:00
fep - > bufdesc_ex =
pdev - > id_entry - > driver_data & FEC_QUIRK_HAS_BUFDESC_EX ;
2012-10-30 18:25:31 +00:00
if ( IS_ERR ( fep - > clk_ptp ) ) {
2013-03-18 15:33:39 +08:00
fep - > clk_ptp = NULL ;
2013-01-03 16:04:23 +00:00
fep - > bufdesc_ex = 0 ;
2012-10-30 18:25:31 +00:00
}
2014-05-20 13:22:51 +08:00
ret = fec_enet_clk_enable ( ndev , true ) ;
2013-07-21 13:25:02 -03:00
if ( ret )
goto failed_clk ;
2013-05-27 03:48:29 +00:00
fep - > reg_phy = devm_regulator_get ( & pdev - > dev , " phy " ) ;
if ( ! IS_ERR ( fep - > reg_phy ) ) {
ret = regulator_enable ( fep - > reg_phy ) ;
2012-06-27 03:45:21 +00:00
if ( ret ) {
dev_err ( & pdev - > dev ,
" Failed to enable phy regulator: %d \n " , ret ) ;
goto failed_regulator ;
}
2013-05-27 03:48:31 +00:00
} else {
fep - > reg_phy = NULL ;
2012-06-27 03:45:21 +00:00
}
2012-06-27 03:45:20 +00:00
fec_reset_phy ( pdev ) ;
2013-02-22 06:40:45 +00:00
if ( fep - > bufdesc_ex )
2013-06-07 10:48:00 +00:00
fec_ptp_init ( pdev ) ;
2013-02-22 06:40:45 +00:00
ret = fec_enet_init ( ndev ) ;
if ( ret )
goto failed_init ;
for ( i = 0 ; i < FEC_IRQ_NUM ; i + + ) {
irq = platform_get_irq ( pdev , i ) ;
if ( irq < 0 ) {
if ( i )
break ;
ret = irq ;
goto failed_irq ;
}
2013-07-21 13:25:04 -03:00
ret = devm_request_irq ( & pdev - > dev , irq , fec_enet_interrupt ,
2013-09-13 05:44:38 +02:00
0 , pdev - > name , ndev ) ;
2013-07-21 13:25:04 -03:00
if ( ret )
2013-02-22 06:40:45 +00:00
goto failed_irq ;
}
2010-03-31 02:10:44 +00:00
ret = fec_enet_mii_init ( pdev ) ;
if ( ret )
goto failed_mii_init ;
2010-10-07 02:30:30 +00:00
/* Carrier starts down, phylib will bring it up */
netif_carrier_off ( ndev ) ;
2014-05-20 13:22:51 +08:00
fec_enet_clk_enable ( ndev , false ) ;
2010-10-07 02:30:30 +00:00
2009-01-28 23:03:11 +00:00
ret = register_netdev ( ndev ) ;
if ( ret )
goto failed_register ;
2013-04-13 07:25:36 +00:00
if ( fep - > bufdesc_ex & & fep - > ptp_clock )
netdev_info ( ndev , " registered PHC device %d \n " , fep - > dev_id ) ;
2013-05-07 14:08:44 +00:00
INIT_DELAYED_WORK ( & ( fep - > delay_work . delay_work ) , fec_enet_work ) ;
2009-01-28 23:03:11 +00:00
return 0 ;
failed_register :
2010-03-31 02:10:44 +00:00
fec_enet_mii_remove ( fep ) ;
failed_mii_init :
2013-05-27 03:48:30 +00:00
failed_irq :
failed_init :
2013-05-27 03:48:31 +00:00
if ( fep - > reg_phy )
regulator_disable ( fep - > reg_phy ) ;
2012-06-27 03:45:21 +00:00
failed_regulator :
2014-05-20 13:22:51 +08:00
fec_enet_clk_enable ( ndev , false ) ;
2009-01-28 23:03:11 +00:00
failed_clk :
failed_ioremap :
free_netdev ( ndev ) ;
return ret ;
}
2012-12-03 09:23:58 -05:00
static int
2009-01-28 23:03:11 +00:00
fec_drv_remove ( struct platform_device * pdev )
{
struct net_device * ndev = platform_get_drvdata ( pdev ) ;
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2013-05-07 14:08:44 +00:00
cancel_delayed_work_sync ( & ( fep - > delay_work . delay_work ) ) ;
2011-12-07 21:59:31 +00:00
unregister_netdev ( ndev ) ;
2010-03-31 02:10:44 +00:00
fec_enet_mii_remove ( fep ) ;
2012-10-30 18:25:31 +00:00
del_timer_sync ( & fep - > time_keep ) ;
2013-05-27 03:48:31 +00:00
if ( fep - > reg_phy )
regulator_disable ( fep - > reg_phy ) ;
2012-10-30 18:25:31 +00:00
if ( fep - > ptp_clock )
ptp_clock_unregister ( fep - > ptp_clock ) ;
2014-05-20 13:22:51 +08:00
fec_enet_clk_enable ( ndev , false ) ;
2009-01-28 23:03:11 +00:00
free_netdev ( ndev ) ;
2011-01-13 21:44:18 +01:00
2009-01-28 23:03:11 +00:00
return 0 ;
}
2013-04-16 08:17:46 +00:00
# ifdef CONFIG_PM_SLEEP
2009-01-28 23:03:11 +00:00
static int
2010-06-18 04:19:54 +00:00
fec_suspend ( struct device * dev )
2009-01-28 23:03:11 +00:00
{
2010-06-18 04:19:54 +00:00
struct net_device * ndev = dev_get_drvdata ( dev ) ;
2011-01-13 21:53:40 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2009-01-28 23:03:11 +00:00
2011-01-13 21:53:40 +01:00
if ( netif_running ( ndev ) ) {
fec_stop ( ndev ) ;
netif_device_detach ( ndev ) ;
2009-01-28 23:03:11 +00:00
}
2014-05-20 13:22:51 +08:00
fec_enet_clk_enable ( ndev , false ) ;
2011-01-13 21:53:40 +01:00
2013-05-27 03:48:33 +00:00
if ( fep - > reg_phy )
regulator_disable ( fep - > reg_phy ) ;
2009-01-28 23:03:11 +00:00
return 0 ;
}
static int
2010-06-18 04:19:54 +00:00
fec_resume ( struct device * dev )
2009-01-28 23:03:11 +00:00
{
2010-06-18 04:19:54 +00:00
struct net_device * ndev = dev_get_drvdata ( dev ) ;
2011-01-13 21:53:40 +01:00
struct fec_enet_private * fep = netdev_priv ( ndev ) ;
2013-05-27 03:48:33 +00:00
int ret ;
if ( fep - > reg_phy ) {
ret = regulator_enable ( fep - > reg_phy ) ;
if ( ret )
return ret ;
}
2009-01-28 23:03:11 +00:00
2014-05-20 13:22:51 +08:00
ret = fec_enet_clk_enable ( ndev , true ) ;
2013-07-21 13:25:02 -03:00
if ( ret )
2014-05-20 13:22:51 +08:00
goto failed_clk ;
2013-07-21 13:25:02 -03:00
2011-01-13 21:53:40 +01:00
if ( netif_running ( ndev ) ) {
fec_restart ( ndev , fep - > full_duplex ) ;
netif_device_attach ( ndev ) ;
2009-01-28 23:03:11 +00:00
}
2011-01-13 21:53:40 +01:00
2009-01-28 23:03:11 +00:00
return 0 ;
2013-07-21 13:25:02 -03:00
2014-05-20 13:22:51 +08:00
failed_clk :
2013-07-21 13:25:02 -03:00
if ( fep - > reg_phy )
regulator_disable ( fep - > reg_phy ) ;
return ret ;
2009-01-28 23:03:11 +00:00
}
2013-04-16 08:17:46 +00:00
# endif /* CONFIG_PM_SLEEP */
2009-01-28 23:03:11 +00:00
2013-04-16 08:17:46 +00:00
static SIMPLE_DEV_PM_OPS ( fec_pm_ops , fec_suspend , fec_resume ) ;
2010-06-02 09:27:04 +00:00
2009-01-28 23:03:11 +00:00
static struct platform_driver fec_driver = {
. driver = {
2011-01-05 21:13:13 +00:00
. name = DRIVER_NAME ,
2010-06-18 04:19:54 +00:00
. owner = THIS_MODULE ,
. pm = & fec_pm_ops ,
2011-06-25 02:04:35 +08:00
. of_match_table = fec_dt_ids ,
2009-01-28 23:03:11 +00:00
} ,
2011-01-05 21:13:13 +00:00
. id_table = fec_devtype ,
2010-06-18 04:19:54 +00:00
. probe = fec_probe ,
2012-12-03 09:23:58 -05:00
. remove = fec_drv_remove ,
2009-01-28 23:03:11 +00:00
} ;
2012-01-23 16:44:50 +00:00
module_platform_driver ( fec_driver ) ;
2005-04-16 15:20:36 -07:00
2013-07-20 16:20:36 -03:00
MODULE_ALIAS ( " platform: " DRIVER_NAME ) ;
2005-04-16 15:20:36 -07:00
MODULE_LICENSE ( " GPL " ) ;