From 9b5330edf1f8e24a8ca25412e6d341e0fae0ad0a Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:46 +0800 Subject: [PATCH 01/12] net:fec: add enet refrence clock for i.MX 6SX chip i.MX6sx enet has below clocks for user config: clk_ipg: ipg_clk_s, ipg_clk_mac0_s, 66Mhz clk_ahb: enet system clock, it is enet AXI clock for imx6sx. For imx6sx, it alos is the clock source of interrupt coalescing. The clock range: 200Mhz ~ 266Mhz. clk_ref: refrence clock for tx and rx. For imx6sx enet RGMII mode, the refrence clock is 125Mhz coming from internal PLL or external. In i.MX6sx-arm2 board, the clock is from internal PLL. clk_ref is optional, depends on board. clk_enet_out: The clock can be output from internal PLL. It can supply 50Mhz clock for phy. clk_enet_out is optional, depends on chip and board. clk_ptp: 1588 ts clock. It is optional, depends on chip. The patch add clk_ref to distiguish the different clocks. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 1 + drivers/net/ethernet/freescale/fec_main.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ee41d98b44b6..635772bbfb73 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -272,6 +272,7 @@ struct fec_enet_private { struct clk *clk_ipg; struct clk *clk_ahb; + struct clk *clk_ref; struct clk *clk_enet_out; struct clk *clk_ptp; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 89355a719625..c21ecff17608 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1621,6 +1621,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } mutex_unlock(&fep->ptp_clk_mutex); } + if (fep->clk_ref) { + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; + } } else { clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -1632,9 +1637,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) fep->ptp_clk_on = false; mutex_unlock(&fep->ptp_clk_mutex); } + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); } return 0; + +failed_clk_ref: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); @@ -2637,6 +2648,12 @@ fec_probe(struct platform_device *pdev) fep->ptp_clk_on = false; mutex_init(&fep->ptp_clk_mutex); + + /* clk_ref is optional, depends on board */ + fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) + fep->clk_ref = NULL; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->bufdesc_ex = pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; From 95a774706d15fdbaef19656b8aae18a985465729 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:47 +0800 Subject: [PATCH 02/12] net:fec: add enet AVB feature macro define for imx6sx Add enet AVB feature macro define for imx6sx. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c21ecff17608..ee9f04ff18cb 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -104,6 +104,16 @@ static void set_multicast_list(struct net_device *ndev); * ENET_TDAR[TDAR]. */ #define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) static struct platform_device_id fec_devtype[] = { { @@ -127,6 +137,12 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, + }, { + .name = "imx6sx-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_AVB, }, { /* sentinel */ } From 4d494cdc92b3b9a0f5fb9e1560810fa27d5a0489 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:48 +0800 Subject: [PATCH 03/12] net: fec: change data structure to support multiqueue This patch just change data structure to support multi-queue. Only 1 queue enabled. Ethernet multiqueue mechanism can improve performance in SMP system. For single hw queue, multiqueue can balance cpu loading. For multi hw queues, multiple cores can process network packets in parallel, and refer the article for the detail advantage for multiqueue: http://vger.kernel.org/~davem/davem_nyc09.pdf Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 115 ++++-- drivers/net/ethernet/freescale/fec_main.c | 483 ++++++++++++++-------- 2 files changed, 401 insertions(+), 197 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 635772bbfb73..b2b91f8826a2 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -27,8 +27,8 @@ */ #define FEC_IEVENT 0x004 /* Interrupt event reg */ #define FEC_IMASK 0x008 /* Interrupt mask reg */ -#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ -#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ #define FEC_ECNTRL 0x024 /* Ethernet control reg */ #define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */ @@ -45,14 +45,26 @@ #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ -#define FEC_R_DES_START 0x180 /* Receive descriptor ring */ -#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ +#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ +#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ +#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ +#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_RACC 0x1C4 /* Receive Accelerator function */ +#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ +#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ +#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ +#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */ +#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */ +#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ +#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ +#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -233,6 +245,43 @@ struct bufdesc_ex { /* This device has up to three irqs on some platforms */ #define FEC_IRQ_NUM 3 +/* Maximum number of queues supported + * ENET with AVB IP can support up to 3 independent tx queues and rx queues. + * User can point the queue number that is less than or equal to 3. + */ +#define FEC_ENET_MAX_TX_QS 3 +#define FEC_ENET_MAX_RX_QS 3 + +#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ + ((X == 2) ? \ + FEC_R_DES_START_2 : FEC_R_DES_START_0)) +#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ + ((X == 2) ? \ + FEC_X_DES_START_2 : FEC_X_DES_START_0)) +#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ + ((X == 2) ? \ + FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) +#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ + ((X == 2) ? \ + FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) + +#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) + +#define DMA_CLASS_EN (1 << 16) +#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xFFFF +#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ + (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) +#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ + RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) +#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ + RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) +#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) + /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best * to keep them that size. @@ -256,6 +305,35 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +struct fec_enet_priv_tx_q { + int index; + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *tx_bd_base; + uint tx_ring_size; + + unsigned short tx_stop_threshold; + unsigned short tx_wake_threshold; + + struct bufdesc *cur_tx; + struct bufdesc *dirty_tx; + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; +}; + +struct fec_enet_priv_rx_q { + int index; + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *rx_bd_base; + uint rx_ring_size; + + struct bufdesc *cur_rx; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -280,29 +358,18 @@ struct fec_enet_private { struct mutex ptp_clk_mutex; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - unsigned char *tx_bounce[TX_RING_SIZE]; - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; + struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; - /* CPM dual port RAM relative addresses */ - dma_addr_t bd_dma; - /* Address of Rx and Tx buffers */ - struct bufdesc *rx_bd_base; - struct bufdesc *tx_bd_base; - /* The next free ring entry */ - struct bufdesc *cur_rx, *cur_tx; - /* The ring entries to be free()ed */ - struct bufdesc *dirty_tx; + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; + + unsigned long work_tx; + unsigned long work_rx; + unsigned long work_ts; + unsigned long work_mdio; unsigned short bufdesc_size; - unsigned short tx_ring_size; - unsigned short rx_ring_size; - unsigned short tx_stop_threshold; - unsigned short tx_wake_threshold; - - /* Software TSO */ - char *tso_hdrs; - dma_addr_t tso_hdrs_dma; struct platform_device *pdev; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ee9f04ff18cb..4c0d2ee11428 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -72,6 +72,8 @@ static void set_multicast_list(struct net_device *ndev); #define DRIVER_NAME "fec" +#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) + /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -258,22 +260,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp + 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -285,22 +291,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva } static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp - 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -316,14 +326,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)fep->dirty_tx - - (const char *)fep->cur_tx) / fep->bufdesc_size - 1; + entries = ((const char *)txq->dirty_tx - + (const char *)txq->cur_tx) / fep->bufdesc_size - 1; - return entries > 0 ? entries : entries + fep->tx_ring_size; + return entries > 0 ? entries : entries + txq->tx_ring_size; } static void *swap_buffer(void *bufaddr, int len) @@ -340,22 +351,26 @@ static void *swap_buffer(void *bufaddr, int len) static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = fep->tx_bd_base; - unsigned int index = 0; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + int index = 0; netdev_info(ndev, "TX ring dump\n"); pr_info("Nr SC addr len SKB\n"); + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + do { pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", index, - bdp == fep->cur_tx ? 'S' : ' ', - bdp == fep->dirty_tx ? 'H' : ' ', + bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->dirty_tx ? 'H' : ' ', bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, - fep->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep); + txq->tx_skbuff[index]); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); index++; - } while (bdp != fep->tx_bd_base); + } while (bdp != txq->tx_bd_base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -381,14 +396,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) } static int -fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) +fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -400,7 +418,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); ebdp = (struct bufdesc_ex *)bdp; status = bdp->cbd_sc; @@ -428,11 +446,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & FEC_ALIGNMENT || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], bufaddr, frag_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], bufaddr, frag_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, frag_len); @@ -452,21 +470,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; } - fep->cur_tx = bdp; + txq->cur_tx = bdp; return 0; dma_mapping_error: - bdp = fep->cur_tx; + bdp = txq->cur_tx; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } return NETDEV_TX_OK; } -static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -477,12 +496,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) dma_addr_t addr; unsigned short status; unsigned short buflen; + unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; int ret; - entries_free = fec_enet_get_free_txdesc_num(fep); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -497,7 +517,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; + bdp = txq->cur_tx; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -505,11 +525,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = skb->data; buflen = skb_headlen(skb); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + queue = skb_get_queue_mapping(skb); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & FEC_ALIGNMENT || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, buflen); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, buflen); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, buflen); @@ -525,7 +546,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(skb, ndev); + ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); if (ret) return ret; } else { @@ -553,10 +574,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ebdp->cbd_esc = estatus; } - last_bdp = fep->cur_tx; - index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); + last_bdp = txq->cur_tx; + index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; bdp->cbd_datlen = buflen; bdp->cbd_bufaddr = addr; @@ -568,22 +589,23 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep); + bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; } static int -fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index, char *data, - int size, bool last_tcp, bool is_last) +fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, + struct net_device *ndev, + struct bufdesc *bdp, int index, char *data, + int size, bool last_tcp, bool is_last) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -600,8 +622,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, if (((unsigned long) data) & FEC_ALIGNMENT || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], data, size); - data = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], data, size); + data = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, size); @@ -640,8 +662,9 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, } static int -fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index) +fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -657,12 +680,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; - dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; + bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; + dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; if (((unsigned long) bufaddr) & FEC_ALIGNMENT || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, hdr_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, hdr_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, hdr_len); @@ -692,17 +715,20 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, return 0; } -static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; + unsigned short queue = skb_get_queue_mapping(skb); struct tso_t tso; unsigned int index = 0; int ret; - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -722,14 +748,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; /* prepare packet headers: MAC + IP + TCP */ - hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; + hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); + ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); if (ret) goto err_release; @@ -737,10 +763,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, - size, size == data_left, + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + index = fec_enet_get_bd_index(txq->tx_bd_base, + bdp, fep); + ret = fec_enet_txq_put_data_tso(txq, skb, ndev, + bdp, index, + tso.data, size, + size == data_left, total_len == 0); if (ret) goto err_release; @@ -749,17 +778,17 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; @@ -773,18 +802,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int entries_free; + unsigned short queue; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int ret; + queue = skb_get_queue_mapping(skb); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(ndev, queue); + if (skb_is_gso(skb)) - ret = fec_enet_txq_submit_tso(skb, ndev); + ret = fec_enet_txq_submit_tso(txq, skb, ndev); else - ret = fec_enet_txq_submit_skb(skb, ndev); + ret = fec_enet_txq_submit_skb(txq, skb, ndev); if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free <= fep->tx_stop_threshold) - netif_stop_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free <= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); return NETDEV_TX_OK; } @@ -794,46 +830,51 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) static void fec_enet_bd_init(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned int i; /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { + rxq = fep->rx_queue[0]; + bdp = rxq->rx_bd_base; + + for (i = 0; i < rxq->rx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) bdp->cbd_sc = BD_ENET_RX_EMPTY; else bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, 0); bdp->cbd_sc |= BD_SC_WRAP; - fep->cur_rx = fep->rx_bd_base; + rxq->cur_rx = rxq->rx_bd_base; /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; - for (i = 0; i < fep->tx_ring_size; i++) { + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + txq->cur_tx = bdp; + for (i = 0; i < txq->tx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; } bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, 0); bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; + txq->dirty_tx = bdp; } /* @@ -852,6 +893,8 @@ fec_restart(struct net_device *ndev) u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); @@ -876,19 +919,21 @@ fec_restart(struct net_device *ndev) fec_enet_bd_init(ndev); /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); + rxq = fep->rx_queue[0]; + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(0)); if (fep->bufdesc_ex) - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); + writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc_ex) + * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0)); else - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); + writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc) + * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0)); + txq = fep->tx_queue[0]; for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; } } @@ -1012,7 +1057,7 @@ fec_restart(struct net_device *ndev) /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + writel(0, fep->hwp + FEC_R_DES_ACTIVE(0)); if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); @@ -1097,37 +1142,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, } static void -fec_enet_tx(struct net_device *ndev) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int index = 0; int entries_free; fep = netdev_priv(ndev); - bdp = fep->dirty_tx; + + queue_id = FEC_ENET_GET_QUQUE(queue_id); + + txq = fep->tx_queue[queue_id]; + /* get next bdp of dirty_tx */ + nq = netdev_get_tx_queue(ndev, queue_id); + bdp = txq->dirty_tx; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { /* current queue is empty */ - if (bdp == fep->cur_tx) + if (bdp == txq->cur_tx) break; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - skb = fep->tx_skbuff[index]; - fep->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) + skb = txq->tx_skbuff[index]; + txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); continue; } @@ -1169,23 +1222,37 @@ fec_enet_tx(struct net_device *ndev) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - fep->dirty_tx = bdp; + txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free >= fep->tx_wake_threshold) - netif_wake_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free >= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (bdp != txq->cur_tx && + readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); +} + +static void +fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 queue_id; + /* First process class A queue, then Class B and Best Effort queue */ + for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { + clear_bit(queue_id, &fep->work_tx); + fec_enet_tx_queue(ndev, queue_id); + } + return; } /* During a receive, the cur_rx points to the current incoming buffer. @@ -1194,11 +1261,12 @@ fec_enet_tx(struct net_device *ndev) * effectively tossing the packet. */ static int -fec_enet_rx(struct net_device *ndev, int budget) +fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; @@ -1213,11 +1281,13 @@ fec_enet_rx(struct net_device *ndev, int budget) #ifdef CONFIG_M532x flush_cache_all(); #endif + queue_id = FEC_ENET_GET_QUQUE(queue_id); + rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = fep->cur_rx; + bdp = rxq->cur_rx; while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { @@ -1231,7 +1301,6 @@ fec_enet_rx(struct net_device *ndev, int budget) if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | @@ -1264,8 +1333,8 @@ fec_enet_rx(struct net_device *ndev, int budget) pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); - data = fep->rx_skbuff[index]->data; + index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + data = rxq->rx_skbuff[index]->data; dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); @@ -1280,7 +1349,7 @@ fec_enet_rx(struct net_device *ndev, int budget) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); @@ -1308,7 +1377,7 @@ fec_enet_rx(struct net_device *ndev, int budget) skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); if (vlan_packet_rcvd) payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; - skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), + skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), data + payload_offset, pkt_len - 4 - (2 * ETH_ALEN)); @@ -1357,19 +1426,48 @@ rx_processing_done: } /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); } - fep->cur_rx = bdp; - + rxq->cur_rx = bdp; return pkt_received; } +static int +fec_enet_rx(struct net_device *ndev, int budget) +{ + int pkt_received = 0; + u16 queue_id; + struct fec_enet_private *fep = netdev_priv(ndev); + + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { + clear_bit(queue_id, &fep->work_rx); + pkt_received += fec_enet_rx_queue(ndev, + budget - pkt_received, queue_id); + } + return pkt_received; +} + +static bool +fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +{ + if (int_events == 0) + return false; + + if (int_events & FEC_ENET_RXF) + fep->work_rx |= (1 << 2); + + if (int_events & FEC_ENET_TXF) + fep->work_tx |= (1 << 2); + + return true; +} + static irqreturn_t fec_enet_interrupt(int irq, void *dev_id) { @@ -1381,6 +1479,7 @@ fec_enet_interrupt(int irq, void *dev_id) int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); + fec_enet_collect_events(fep, int_events); if (int_events & napi_mask) { ret = IRQ_HANDLED; @@ -2132,25 +2231,29 @@ static void fec_enet_free_buffers(struct net_device *ndev) unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { - skb = fep->rx_skbuff[i]; - fep->rx_skbuff[i] = NULL; + rxq = fep->rx_queue[0]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = rxq->rx_skbuff[i]; + rxq->rx_skbuff[i] = NULL; if (skb) { dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); } - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - kfree(fep->tx_bounce[i]); - fep->tx_bounce[i] = NULL; - skb = fep->tx_skbuff[i]; - fep->tx_skbuff[i] = NULL; + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + txq->tx_skbuff[i] = NULL; dev_kfree_skb(skb); } } @@ -2161,9 +2264,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { + rxq = fep->rx_queue[0]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { dma_addr_t addr; skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); @@ -2179,7 +2285,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) goto err_alloc; } - fep->rx_skbuff[i] = skb; + rxq->rx_skbuff[i] = skb; bdp->cbd_bufaddr = addr; bdp->cbd_sc = BD_ENET_RX_EMPTY; @@ -2188,17 +2294,18 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, 0); bdp->cbd_sc |= BD_SC_WRAP; - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); - if (!fep->tx_bounce[i]) + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!txq->tx_bounce[i]) goto err_alloc; bdp->cbd_sc = 0; @@ -2209,11 +2316,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, 0); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -2252,7 +2359,8 @@ fec_enet_open(struct net_device *ndev) fec_restart(ndev); napi_enable(&fep->napi); phy_start(fep->phy_dev); - netif_start_queue(ndev); + netif_tx_start_all_queues(ndev); + return 0; } @@ -2426,7 +2534,7 @@ static int fec_set_features(struct net_device *netdev, /* Resume the device after updates */ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { fec_restart(netdev); - netif_wake_queue(netdev); + netif_tx_wake_all_queues(netdev); netif_tx_unlock_bh(netdev); napi_enable(&fep->napi); } @@ -2434,10 +2542,17 @@ static int fec_set_features(struct net_device *netdev, return 0; } +u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + return skb_tx_hash(ndev, skb); +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, + .ndo_select_queue = fec_enet_select_queue, .ndo_set_rx_mode = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, @@ -2459,39 +2574,60 @@ static int fec_enet_init(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; + dma_addr_t bd_dma; int bd_size; - /* init the tx & rx ring size */ - fep->tx_ring_size = TX_RING_SIZE; - fep->rx_ring_size = RX_RING_SIZE; + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) + return -ENOMEM; + fep->tx_queue[0] = txq; - fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; - fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; + rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); + if (!rxq) { + kfree(txq); + return -ENOMEM; + } + + fep->rx_queue[0] = rxq; + + + txq->tx_ring_size = TX_RING_SIZE; + rxq->rx_ring_size = RX_RING_SIZE; + fep->total_tx_ring_size = txq->tx_ring_size; + fep->total_rx_ring_size = rxq->rx_ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = (txq->tx_ring_size - txq->tx_stop_threshold) / 2; if (fep->bufdesc_ex) fep->bufdesc_size = sizeof(struct bufdesc_ex); else fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->tx_ring_size + fep->rx_ring_size) * + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, + cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, GFP_KERNEL); - if (!cbd_base) - return -ENOMEM; - - fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, - &fep->tso_hdrs_dma, GFP_KERNEL); - if (!fep->tso_hdrs) { - dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); + if (!cbd_base) { + kfree(rxq); + kfree(txq); return -ENOMEM; } - memset(cbd_base, 0, PAGE_SIZE); + txq->tso_hdrs = dma_alloc_coherent(NULL, txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, GFP_KERNEL); + if (!txq->tso_hdrs) { + kfree(rxq); + kfree(txq); + dma_free_coherent(NULL, bd_size, cbd_base, bd_dma); + return -ENOMEM; + } - fep->netdev = ndev; + memset(cbd_base, 0, bd_size); /* Get the Ethernet address */ fec_get_mac(ndev); @@ -2499,12 +2635,13 @@ static int fec_enet_init(struct net_device *ndev) fec_set_mac_address(ndev, NULL); /* Set receive and transmit descriptor base. */ - fep->rx_bd_base = cbd_base; + rxq->rx_bd_base = cbd_base; if (fep->bufdesc_ex) - fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); + txq->tx_bd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); else - fep->tx_bd_base = cbd_base + fep->rx_ring_size; + txq->tx_bd_base = cbd_base + rxq->rx_ring_size; + /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; From 9fc095f136b5436fafb22f31c4871b5edcb362b9 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:49 +0800 Subject: [PATCH 04/12] net: fec: parser max queue number from dt file By default, the tx/rx queue number is 1, user can config the queue number at DTS file like this: fsl,num-tx-queues=<3>; fsl,num-rx-queues=<3> Since i.MX6SX enet-AVB IP support multi queues, so use multi queues interface to allocate and set up an Ethernet device. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 2 + drivers/net/ethernet/freescale/fec_main.c | 46 ++++++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index b2b91f8826a2..72fb90f6ec18 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -356,6 +356,8 @@ struct fec_enet_private { bool ptp_clk_on; struct mutex ptp_clk_mutex; + unsigned int num_tx_queues; + unsigned int num_rx_queues; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4c0d2ee11428..2240df0abbaa 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2709,6 +2709,42 @@ static void fec_reset_phy(struct platform_device *pdev) } #endif /* CONFIG_OF */ +static void +fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) +{ + struct device_node *np = pdev->dev.of_node; + int err; + + *num_tx = *num_rx = 1; + + if (!np || !of_device_is_available(np)) + return; + + /* parse the num of tx and rx queues */ + err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); + err |= of_property_read_u32(np, "fsl,num-rx-queues", num_rx); + if (err) { + *num_tx = 1; + *num_rx = 1; + return; + } + + if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { + dev_err(&pdev->dev, "Invalidate num_tx(=%d), fail back to 1\n", + *num_tx); + *num_tx = 1; + return; + } + + if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { + dev_err(&pdev->dev, "Invalidate num_rx(=%d), fail back to 1\n", + *num_rx); + *num_rx = 1; + return; + } + +} + static int fec_probe(struct platform_device *pdev) { @@ -2720,13 +2756,18 @@ fec_probe(struct platform_device *pdev) const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; + int num_tx_qs = 1; + int num_rx_qs = 1; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; + fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); + /* Init network device */ - ndev = alloc_etherdev(sizeof(struct fec_enet_private)); + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), + num_tx_qs, num_rx_qs); if (!ndev) return -ENOMEM; @@ -2735,6 +2776,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + fep->num_rx_queues = num_rx_qs; + fep->num_tx_queues = num_tx_qs; + #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && From 59d0f746564495c7f54526674deabfcf101236a1 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Sat, 13 Sep 2014 05:00:50 +0800 Subject: [PATCH 05/12] net: fec: init multi queue date structure initilized all queues according to queue number get from DT file. Signed-off-by: Frank Li Signed-off-by: Duan Fugang Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 377 +++++++++++++++------- 1 file changed, 258 insertions(+), 119 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2240df0abbaa..03972f7e1ba9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -834,47 +834,98 @@ static void fec_enet_bd_init(struct net_device *dev) struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned int i; + unsigned int q; - /* Initialize the receive buffer descriptors. */ - rxq = fep->rx_queue[0]; - bdp = rxq->rx_bd_base; + for (q = 0; q < fep->num_rx_queues; q++) { + /* Initialize the receive buffer descriptors. */ + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + for (i = 0; i < rxq->rx_ring_size; i++) { - /* Initialize the BD for every fragment in the page. */ - if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; - else - bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep, 0); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, 0); - bdp->cbd_sc |= BD_SC_WRAP; - - rxq->cur_rx = rxq->rx_bd_base; - - /* ...and the same for transmit */ - txq = fep->tx_queue[0]; - bdp = txq->tx_bd_base; - txq->cur_tx = bdp; - - for (i = 0; i < txq->tx_ring_size; i++) { - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - if (txq->tx_skbuff[i]) { - dev_kfree_skb_any(txq->tx_skbuff[i]); - txq->tx_skbuff[i] = NULL; + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); } - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep, 0); + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + + rxq->cur_rx = rxq->rx_bd_base; } - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, 0); - bdp->cbd_sc |= BD_SC_WRAP; - txq->dirty_tx = bdp; + for (q = 0; q < fep->num_tx_queues; q++) { + /* ...and the same for transmit */ + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + txq->cur_tx = bdp; + + for (i = 0; i < txq->tx_ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = 0; + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + txq->dirty_tx = bdp; + } +} + +static void fec_enet_enable_ring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + int i; + + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(RCMR_MATCHEN | RCMR_CMP(i), + fep->hwp + FEC_RCMR(i)); + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(DMA_CLASS_EN | IDLE_SLOPE(i), + fep->hwp + FEC_DMA_CFG(i)); + } +} + +static void fec_enet_reset_skb(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + int i, j; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + + for (j = 0; j < txq->tx_ring_size; j++) { + if (txq->tx_skbuff[j]) { + dev_kfree_skb_any(txq->tx_skbuff[j]); + txq->tx_skbuff[j] = NULL; + } + } + } } /* @@ -893,8 +944,6 @@ fec_restart(struct net_device *ndev) u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - struct fec_enet_priv_tx_q *txq; - struct fec_enet_priv_rx_q *rxq; /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); @@ -918,24 +967,10 @@ fec_restart(struct net_device *ndev) fec_enet_bd_init(ndev); - /* Set receive and transmit descriptor base. */ - rxq = fep->rx_queue[0]; - writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(0)); - if (fep->bufdesc_ex) - writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc_ex) - * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0)); - else - writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc) - * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0)); + fec_enet_enable_ring(ndev); - - txq = fep->tx_queue[0]; - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (txq->tx_skbuff[i]) { - dev_kfree_skb_any(txq->tx_skbuff[i]); - txq->tx_skbuff[i] = NULL; - } - } + /* Reset tx SKB buffers. */ + fec_enet_reset_skb(ndev); /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { @@ -1057,7 +1092,8 @@ fec_restart(struct net_device *ndev) /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE(0)); + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); @@ -2233,41 +2269,122 @@ static void fec_enet_free_buffers(struct net_device *ndev) struct bufdesc *bdp; struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; + unsigned int q; - rxq = fep->rx_queue[0]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { - skb = rxq->rx_skbuff[i]; - rxq->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - dev_kfree_skb(skb); + for (q = 0; q < fep->num_rx_queues; q++) { + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = rxq->rx_skbuff[i]; + rxq->rx_skbuff[i] = NULL; + if (skb) { + dma_unmap_single(&fep->pdev->dev, + bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + bdp = fec_enet_get_nextdesc(bdp, fep, q); } - bdp = fec_enet_get_nextdesc(bdp, fep, 0); } - txq = fep->tx_queue[0]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { - kfree(txq->tx_bounce[i]); - txq->tx_bounce[i] = NULL; - skb = txq->tx_skbuff[i]; - txq->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); + for (q = 0; q < fep->num_tx_queues; q++) { + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + txq->tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } } } -static int fec_enet_alloc_buffers(struct net_device *ndev) +static void fec_enet_free_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { + txq = fep->tx_queue[i]; + dma_free_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, + txq->tso_hdrs_dma); + } + + for (i = 0; i < fep->num_rx_queues; i++) + if (fep->rx_queue[i]) + kfree(fep->rx_queue[i]); + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i]) + kfree(fep->tx_queue[i]); +} + +static int fec_enet_alloc_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + int ret = 0; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->tx_queue[i] = txq; + txq->tx_ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = + (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + + txq->tso_hdrs = dma_alloc_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, + GFP_KERNEL); + if (!txq->tso_hdrs) { + ret = -ENOMEM; + goto alloc_failed; + } + } + + for (i = 0; i < fep->num_rx_queues; i++) { + fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), + GFP_KERNEL); + if (!fep->rx_queue[i]) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; + } + return ret; + +alloc_failed: + fec_enet_free_queue(ndev); + return ret; +} + +static int +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; - struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; - rxq = fep->rx_queue[0]; + rxq = fep->rx_queue[queue]; bdp = rxq->rx_bd_base; for (i = 0; i < rxq->rx_ring_size; i++) { dma_addr_t addr; @@ -2294,14 +2411,28 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep, 0); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, 0); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; + return 0; - txq = fep->tx_queue[0]; + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int +fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + + txq = fep->tx_queue[queue]; bdp = txq->tx_bd_base; for (i = 0; i < txq->tx_ring_size; i++) { txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); @@ -2316,11 +2447,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep, 0); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, 0); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -2330,6 +2461,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) return -ENOMEM; } +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < fep->num_rx_queues; i++) + if (fec_enet_alloc_rxq_buffers(ndev, i)) + return -ENOMEM; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fec_enet_alloc_txq_buffers(ndev, i)) + return -ENOMEM; + return 0; +} + static int fec_enet_open(struct net_device *ndev) { @@ -2579,28 +2725,9 @@ static int fec_enet_init(struct net_device *ndev) struct bufdesc *cbd_base; dma_addr_t bd_dma; int bd_size; + unsigned int i; - txq = kzalloc(sizeof(*txq), GFP_KERNEL); - if (!txq) - return -ENOMEM; - fep->tx_queue[0] = txq; - - rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); - if (!rxq) { - kfree(txq); - return -ENOMEM; - } - - fep->rx_queue[0] = rxq; - - - txq->tx_ring_size = TX_RING_SIZE; - rxq->rx_ring_size = RX_RING_SIZE; - fep->total_tx_ring_size = txq->tx_ring_size; - fep->total_rx_ring_size = rxq->rx_ring_size; - - txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; - txq->tx_wake_threshold = (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + fec_enet_alloc_queue(ndev); if (fep->bufdesc_ex) fep->bufdesc_size = sizeof(struct bufdesc_ex); @@ -2613,17 +2740,6 @@ static int fec_enet_init(struct net_device *ndev) cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, GFP_KERNEL); if (!cbd_base) { - kfree(rxq); - kfree(txq); - return -ENOMEM; - } - - txq->tso_hdrs = dma_alloc_coherent(NULL, txq->tx_ring_size * TSO_HEADER_SIZE, - &txq->tso_hdrs_dma, GFP_KERNEL); - if (!txq->tso_hdrs) { - kfree(rxq); - kfree(txq); - dma_free_coherent(NULL, bd_size, cbd_base, bd_dma); return -ENOMEM; } @@ -2635,12 +2751,35 @@ static int fec_enet_init(struct net_device *ndev) fec_set_mac_address(ndev, NULL); /* Set receive and transmit descriptor base. */ - rxq->rx_bd_base = cbd_base; - if (fep->bufdesc_ex) - txq->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); - else - txq->tx_bd_base = cbd_base + rxq->rx_ring_size; + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + rxq->index = i; + rxq->rx_bd_base = (struct bufdesc *)cbd_base; + rxq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; + cbd_base += rxq->rx_ring_size; + } + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + txq->index = i; + txq->tx_bd_base = (struct bufdesc *)cbd_base; + txq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; + cbd_base += txq->tx_ring_size; + } + } /* The FEC Ethernet specific entries in the device structure */ From 106c314c7a765d3c472f264e0915de6180922cda Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:51 +0800 Subject: [PATCH 06/12] net:fec: Disable enet-avb MAC instead of reset MAC For i.MX6SX enet use AXI bus, reset MAC will make system bus dead if ENET-AXI bus has pending access (AHB bus should not have such issue). So, disable enet with AVB MAC instead of reset MAC itself. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 26 +++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 03972f7e1ba9..5f8e997defad 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -945,9 +945,16 @@ fec_restart(struct net_device *ndev) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } /* * enet-mac reset will reset mac address registers too, @@ -1118,9 +1125,16 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); From ce99d0d3abba0faf796a6ce999e4f0356c7b4653 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Sat, 13 Sep 2014 05:00:52 +0800 Subject: [PATCH 07/12] net: fec: add enet-avb IP support i.MX6SX Enet-AVB support 3 tx queues, 3 rx queues. For tx queues: ring 0 -> best effort ring 1 -> Class A ring 2 -> Class B For rx queues: ring 0 -> best effort ring 1 -> receive VLAN packet with classification match ring 2 -> receive VLAN packet with classification match Add enet-avb IP multiqueue support for the driver. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 33 +++++++++++++++++++++ drivers/net/ethernet/freescale/fec_main.c | 36 +++++++++++------------ 2 files changed, 51 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 72fb90f6ec18..5ec382887d4a 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -38,6 +38,12 @@ #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ #define FEC_OPD 0x0ec /* Opcode + Pause duration */ +#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ +#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ +#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ #define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ #define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ #define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ @@ -65,6 +71,7 @@ #define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ #define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ #define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ +#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -305,6 +312,32 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +/* Interrupt events/masks. */ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */ +#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */ +#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) +#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_TS_AVAIL ((uint)0x00010000) +#define FEC_ENET_TS_TIMER ((uint)0x00008000) + +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) + +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 + struct fec_enet_priv_tx_q { int index; unsigned char *tx_bounce[TX_RING_SIZE]; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5f8e997defad..658b0b344bd4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -193,21 +193,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif #endif /* CONFIG_M5272 */ -/* Interrupt events/masks. */ -#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ -#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ -#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ -#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ -#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ -#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ -#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ -#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ -#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ -#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ - -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) -#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) - /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. */ #define PKT_MAXBUF_SIZE 1522 @@ -882,6 +867,15 @@ static void fec_enet_bd_init(struct net_device *dev) } } +static void fec_enet_active_rxring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); +} + static void fec_enet_enable_ring(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -939,7 +933,6 @@ fec_restart(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - int i; u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; @@ -1099,8 +1092,7 @@ fec_restart(struct net_device *ndev) /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); - for (i = 0; i < fep->num_rx_queues; i++) - writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); + fec_enet_active_rxring(ndev); if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); @@ -1511,9 +1503,17 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) if (int_events & FEC_ENET_RXF) fep->work_rx |= (1 << 2); + if (int_events & FEC_ENET_RXF_1) + fep->work_rx |= (1 << 0); + if (int_events & FEC_ENET_RXF_2) + fep->work_rx |= (1 << 1); if (int_events & FEC_ENET_TXF) fep->work_tx |= (1 << 2); + if (int_events & FEC_ENET_TXF_1) + fep->work_tx |= (1 << 0); + if (int_events & FEC_ENET_TXF_2) + fep->work_tx |= (1 << 1); return true; } From ba593e00e645a8522a97a14a90a176d53629976e Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:53 +0800 Subject: [PATCH 08/12] net:fec: Add fsl,imx6sx-fec compatible strings Add compatible string "fsl,imx6sx-fec" for i.MX6SX. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 658b0b344bd4..0cc7313944f5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -157,6 +157,7 @@ enum imx_fec_type { IMX28_FEC, IMX6Q_FEC, MVF600_FEC, + IMX6SX_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -165,6 +166,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); From 41ef84ce4c7231ecdf6f116f03635d2a184bc5ba Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:54 +0800 Subject: [PATCH 09/12] net: fec: change FEC alignment according to i.mx6 sx requirement i.MX6 SX change FEC alignment requirement. i.MX6 SX change internal bus from AHB to AXI. It require RX buffer must be 64 bytes alignment. And remove TX buffer alignment requirement. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 3 ++ drivers/net/ethernet/freescale/fec_main.c | 35 ++++++++++++++++------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 5ec382887d4a..b7c77229f1e9 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -443,6 +443,9 @@ struct fec_enet_private { int hwts_tx_en; struct delayed_work time_keep; struct regulator *reg_phy; + + unsigned int tx_align; + unsigned int rx_align; }; void fec_ptp_init(struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0cc7313944f5..9840a10d07c6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -64,12 +64,6 @@ static void set_multicast_list(struct net_device *ndev); -#if defined(CONFIG_ARM) -#define FEC_ALIGNMENT 0xf -#else -#define FEC_ALIGNMENT 0x3 -#endif - #define DRIVER_NAME "fec" #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) @@ -434,7 +428,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], bufaddr, frag_len); bufaddr = txq->tx_bounce[index]; @@ -514,7 +508,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, queue = skb_get_queue_mapping(skb); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, buflen); bufaddr = txq->tx_bounce[index]; @@ -607,7 +601,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - if (((unsigned long) data) & FEC_ALIGNMENT || + if (((unsigned long) data) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], data, size); data = txq->tx_bounce[index]; @@ -669,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + if (((unsigned long)bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, hdr_len); bufaddr = txq->tx_bounce[index]; @@ -2399,6 +2393,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) struct sk_buff *skb; struct bufdesc *bdp; struct fec_enet_priv_rx_q *rxq; + unsigned int off; rxq = fep->rx_queue[queue]; bdp = rxq->rx_bd_base; @@ -2409,8 +2404,13 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) if (!skb) goto err_alloc; + off = ((unsigned long)skb->data) & fep->rx_align; + if (off) + skb_reserve(skb, fep->rx_align + 1 - off); + addr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { dev_kfree_skb(skb); if (net_ratelimit()) @@ -2743,6 +2743,14 @@ static int fec_enet_init(struct net_device *ndev) int bd_size; unsigned int i; +#if defined(CONFIG_ARM) + fep->rx_align = 0xf; + fep->tx_align = 0xf; +#else + fep->rx_align = 0x3; + fep->tx_align = 0x3; +#endif + fec_enet_alloc_queue(ndev); if (fep->bufdesc_ex) @@ -2819,6 +2827,11 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + fep->tx_align = 0; + fep->rx_align = 0x3f; + } + ndev->hw_features = ndev->features; fec_restart(ndev); From b4d39b53c2150439afbbeec418221ab96f432703 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Sat, 13 Sep 2014 05:00:55 +0800 Subject: [PATCH 10/12] net: fec: init complete variable in early to avoid kernel dump Software clear the MDIO interrupt before MDIO bus access, but MAC still generate MDIO interrupt. The issue only happen on imx6slx chip. CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.17.0-rc1-00399-g0bcad17 #315 Backtrace: [<800121fc>] (dump_backtrace) from [<800124e0>] (show_stack+0x18/0x1c) r6:8096e534 r5:8096e534 r4:00000000 r3:00000000 [<800124c8>] (show_stack) from [<806a4c60>] (dump_stack+0x8c/0xa4) [<806a4bd4>] (dump_stack) from [<80060ab8>] (__lock_acquire+0x1814/0x1c40) r6:be078000 r5:be074000 r4:be03f6e4 r3:be078000 [<8005f2a4>] (__lock_acquire) from [<800616e0>] (lock_acquire+0x70/0x84) r10:809ada33 r9:be010600 r8:00000096 r7:00000001 r6:be074000 r5:00000000 r4:60000193 [<80061670>] (lock_acquire) from [<806abb20>] (_raw_spin_lock_irqsave+0x40/0x54) r7:00000000 r6:8005a3f8 r5:00000193 r4:be03f6d4 [<806abae0>] (_raw_spin_lock_irqsave) from [<8005a3f8>] (complete+0x1c/0x4c) r6:80950904 r5:be03f6d0 r4:be03f6d4 [<8005a3dc>] (complete) from [<8041b4c0>] (fec_enet_interrupt+0x128/0x164) r6:80950904 r5:00800000 r4:be03f000 r3:00000000 [<8041b398>] (fec_enet_interrupt) from [<8006aeac>] (handle_irq_event_percpu+0x38/0x13c) r6:00000000 r5:be01065c r4:be399e00 r3:8041b398 [<8006ae74>] (handle_irq_event_percpu) from [<8006aff4>] (handle_irq_event+0x44/0x64) r10:be03f000 r9:80989fe0 r8:00000000 r7:00000096 r6:be399e00 r5:be01065c r4:be010600 [<8006afb0>] (handle_irq_event) from [<8006e3e8>] (handle_fasteoi_irq+0xc8/0x1bc) r6:8096e764 r5:be01065c r4:be010600 r3:00000000 [<8006e320>] (handle_fasteoi_irq) from [<8006a63c>] (generic_handle_irq+0x30/0x44) r6:be074010 r5:80945e4c r4:00000096 r3:8006e320 [<8006a60c>] (generic_handle_irq) from [<8000f218>] (handle_IRQ+0x54/0xbc) r4:80950d74 r3:00000180 [<8000f1c4>] (handle_IRQ) from [<800086cc>] (gic_handle_irq+0x30/0x68) r8:be3ab478 r7:c080e100 r6:be075bd8 r5:80950eec r4:c080e10c r3:000000a0 [<8000869c>] (gic_handle_irq) from [<80013064>] (__irq_svc+0x44/0x5c) Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9840a10d07c6..8f8e55ea7f85 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3066,6 +3066,7 @@ fec_probe(struct platform_device *pdev) goto failed_irq; } + init_completion(&fep->mdio_done); ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; From 08313641e09326ace2b91bc461b1456c763c0d8b Mon Sep 17 00:00:00 2001 From: Frank Li Date: Sat, 13 Sep 2014 05:00:56 +0800 Subject: [PATCH 11/12] ARM: Documentation: Update fec dts binding doc This patch update fec devicetree binding doc that add Optional properties "fsl,num-tx-queues" and "fsl,num-rx-queues". Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/fsl-fec.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 8a2c7b55ec16..0c8775c45798 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -16,6 +16,12 @@ Optional properties: - phy-handle : phandle to the PHY device connected to this device. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. Use instead of phy-handle. +- fsl,num-tx-queues : The property is valid for enet-avb IP, which supports + hw multi queues. Should specify the tx queue number, otherwise set tx queue + number to 1. +- fsl,num-rx-queues : The property is valid for enet-avb IP, which supports + hw multi queues. Should specify the rx queue number, otherwise set rx queue + number to 1. Optional subnodes: - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes From 0afdfe951989aec4528a88213b1e1b1b595feae0 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Sat, 13 Sep 2014 05:00:57 +0800 Subject: [PATCH 12/12] ARM: dts: imx6sx: add multi-queue support enet Enable 3 queues suppport for ethernet Signed-off-by: Frank Li Signed-off-by: David S. Miller --- arch/arm/boot/dts/imx6sx.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index f4b9da65bc0f..0a03260c1707 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -776,6 +776,8 @@ <&clks IMX6SX_CLK_ENET_PTP>; clock-names = "ipg", "ahb", "ptp", "enet_clk_ref", "enet_out"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; status = "disabled"; };