2012-06-07 10:45:02 +00:00
/*
* Copyright 2012 Tilera Corporation . All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation , version 2.
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE , GOOD TITLE or
* NON INFRINGEMENT . See the GNU General Public License for
* more details .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/moduleparam.h>
# include <linux/sched.h>
# include <linux/kernel.h> /* printk() */
# include <linux/slab.h> /* kmalloc() */
# include <linux/errno.h> /* error codes */
# include <linux/types.h> /* size_t */
# include <linux/interrupt.h>
# include <linux/in.h>
# include <linux/irq.h>
# include <linux/netdevice.h> /* struct device, and other headers */
# include <linux/etherdevice.h> /* eth_type_trans */
# include <linux/skbuff.h>
# include <linux/ioctl.h>
# include <linux/cdev.h>
# include <linux/hugetlb.h>
# include <linux/in6.h>
# include <linux/timer.h>
# include <linux/hrtimer.h>
# include <linux/ktime.h>
# include <linux/io.h>
# include <linux/ctype.h>
# include <linux/ip.h>
2013-08-01 11:36:42 -04:00
# include <linux/ipv6.h>
2012-06-07 10:45:02 +00:00
# include <linux/tcp.h>
2013-08-01 11:36:42 -04:00
# include <linux/net_tstamp.h>
# include <linux/ptp_clock_kernel.h>
2015-07-10 15:37:25 -04:00
# include <linux/tick.h>
2012-06-07 10:45:02 +00:00
# include <asm/checksum.h>
# include <asm/homecache.h>
# include <gxio/mpipe.h>
# include <arch/sim.h>
/* Default transmit lockup timeout period, in jiffies. */
# define TILE_NET_TIMEOUT (5 * HZ)
/* The maximum number of distinct channels (idesc.channel is 5 bits). */
# define TILE_NET_CHANNELS 32
/* Maximum number of idescs to handle per "poll". */
# define TILE_NET_BATCH 128
/* Maximum number of packets to handle per "poll". */
# define TILE_NET_WEIGHT 64
/* Number of entries in each iqueue. */
# define IQUEUE_ENTRIES 512
/* Number of entries in each equeue. */
# define EQUEUE_ENTRIES 2048
/* Total header bytes per equeue slot. Must be big enough for 2 bytes
* of NET_IP_ALIGN alignment , plus 14 bytes ( ? ) of L2 header , plus up to
* 60 bytes of actual TCP header . We round up to align to cache lines .
*/
# define HEADER_BYTES 128
/* Maximum completions per cpu per device (must be a power of two).
* ISSUE : What is the right number here ? If this is too small , then
* egress might block waiting for free space in a completions array .
* ISSUE : At the least , allocate these only for initialized echannels .
*/
# define TILE_NET_MAX_COMPS 64
# define MAX_FRAGS (MAX_SKB_FRAGS + 1)
2013-08-01 11:36:42 -04:00
/* The "kinds" of buffer stacks (small/large/jumbo). */
# define MAX_KINDS 3
2012-06-07 10:45:02 +00:00
/* Size of completions data to allocate.
* ISSUE : Probably more than needed since we don ' t use all the channels .
*/
# define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
/* Size of NotifRing data to allocate. */
# define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
/* Timeout to wake the per-device TX timer after we stop the queue.
* We don ' t want the timeout too short ( adds overhead , and might end
* up causing stop / wake / stop / wake cycles ) or too long ( affects performance ) .
* For the 10 Gb NIC , 30 usec means roughly 30 + 1500 - byte packets .
*/
# define TX_TIMER_DELAY_USEC 30
/* Timeout to wake the per-cpu egress timer to free completions. */
# define EGRESS_TIMER_DELAY_USEC 1000
MODULE_AUTHOR ( " Tilera Corporation " ) ;
MODULE_LICENSE ( " GPL " ) ;
/* A "packet fragment" (a chunk of memory). */
struct frag {
void * buf ;
size_t length ;
} ;
/* A single completion. */
struct tile_net_comp {
/* The "complete_count" when the completion will be complete. */
s64 when ;
/* The buffer to be freed when the completion is complete. */
struct sk_buff * skb ;
} ;
/* The completions for a given cpu and echannel. */
struct tile_net_comps {
/* The completions. */
struct tile_net_comp comp_queue [ TILE_NET_MAX_COMPS ] ;
/* The number of completions used. */
unsigned long comp_next ;
/* The number of completions freed. */
unsigned long comp_last ;
} ;
/* The transmit wake timer for a given cpu and echannel. */
struct tile_net_tx_wake {
2012-07-01 14:43:47 -04:00
int tx_queue_idx ;
2012-06-07 10:45:02 +00:00
struct hrtimer timer ;
struct net_device * dev ;
} ;
/* Info for a specific cpu. */
struct tile_net_info {
/* Our cpu. */
int my_cpu ;
/* A timer for handling egress completions. */
struct hrtimer egress_timer ;
/* True if "egress_timer" is scheduled. */
bool egress_timer_scheduled ;
2013-08-01 11:36:42 -04:00
struct info_mpipe {
/* Packet queue. */
gxio_mpipe_iqueue_t iqueue ;
/* The NAPI struct. */
struct napi_struct napi ;
/* Number of buffers (by kind) which must still be provided. */
unsigned int num_needed_buffers [ MAX_KINDS ] ;
/* instance id. */
int instance ;
/* True if iqueue is valid. */
bool has_iqueue ;
/* NAPI flags. */
bool napi_added ;
bool napi_enabled ;
/* Comps for each egress channel. */
struct tile_net_comps * comps_for_echannel [ TILE_NET_CHANNELS ] ;
/* Transmit wake timer for each egress channel. */
struct tile_net_tx_wake tx_wake [ TILE_NET_CHANNELS ] ;
} mpipe [ NR_MPIPE_MAX ] ;
2012-06-07 10:45:02 +00:00
} ;
/* Info for egress on a particular egress channel. */
struct tile_net_egress {
/* The "equeue". */
gxio_mpipe_equeue_t * equeue ;
/* The headers for TSO. */
unsigned char * headers ;
} ;
/* Info for a specific device. */
struct tile_net_priv {
/* Our network device. */
struct net_device * dev ;
/* The primary link. */
gxio_mpipe_link_t link ;
/* The primary channel, if open, else -1. */
int channel ;
/* The "loopify" egress link, if needed. */
gxio_mpipe_link_t loopify_link ;
/* The "loopify" egress channel, if open, else -1. */
int loopify_channel ;
/* The egress channel (channel or loopify_channel). */
int echannel ;
2013-08-01 11:36:42 -04:00
/* mPIPE instance, 0 or 1. */
int instance ;
2013-08-01 11:36:42 -04:00
/* The timestamp config. */
struct hwtstamp_config stamp_cfg ;
2012-06-07 10:45:02 +00:00
} ;
2013-08-01 11:36:42 -04:00
static struct mpipe_data {
/* The ingress irq. */
int ingress_irq ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
/* The "context" for all devices. */
gxio_mpipe_context_t context ;
/* Egress info, indexed by "priv->echannel"
* ( lazily created as needed ) .
*/
struct tile_net_egress
egress_for_echannel [ TILE_NET_CHANNELS ] ;
/* Devices currently associated with each channel.
* NOTE : The array entry can become NULL after ifconfig down , but
* we do not free the underlying net_device structures , so it is
* safe to use a pointer after reading it from this array .
*/
struct net_device
* tile_net_devs_for_channel [ TILE_NET_CHANNELS ] ;
/* The actual memory allocated for the buffer stacks. */
void * buffer_stack_vas [ MAX_KINDS ] ;
/* The amount of memory allocated for each buffer stack. */
size_t buffer_stack_bytes [ MAX_KINDS ] ;
/* The first buffer stack index
* ( small = + 0 , large = + 1 , jumbo = + 2 ) .
*/
int first_buffer_stack ;
/* The buckets. */
int first_bucket ;
int num_buckets ;
2013-08-01 11:36:42 -04:00
/* PTP-specific data. */
struct ptp_clock * ptp_clock ;
struct ptp_clock_info caps ;
/* Lock for ptp accessors. */
struct mutex ptp_lock ;
2013-08-01 11:36:42 -04:00
} mpipe_data [ NR_MPIPE_MAX ] = {
[ 0 . . . ( NR_MPIPE_MAX - 1 ) ] {
. ingress_irq = - 1 ,
. first_buffer_stack = - 1 ,
. first_bucket = - 1 ,
. num_buckets = 1
}
} ;
2012-06-07 10:45:02 +00:00
/* A mutex for "tile_net_devs_for_channel". */
static DEFINE_MUTEX ( tile_net_devs_for_channel_mutex ) ;
/* The per-cpu info. */
static DEFINE_PER_CPU ( struct tile_net_info , per_cpu_info ) ;
2013-08-01 11:36:42 -04:00
/* The buffer size enums for each buffer stack.
2012-06-07 10:45:02 +00:00
* See arch / tile / include / gxio / mpipe . h for the set of possible values .
2013-08-01 11:36:42 -04:00
* We avoid the " 10384 " size because it can induce " false chaining "
* on " cut-through " jumbo packets .
2012-06-07 10:45:02 +00:00
*/
2013-08-01 11:36:42 -04:00
static gxio_mpipe_buffer_size_enum_t buffer_size_enums [ MAX_KINDS ] = {
GXIO_MPIPE_BUFFER_SIZE_128 ,
GXIO_MPIPE_BUFFER_SIZE_1664 ,
GXIO_MPIPE_BUFFER_SIZE_16384
} ;
2012-06-07 10:45:02 +00:00
/* Text value of tile_net.cpus if passed as a module parameter. */
static char * network_cpus_string ;
/* The actual cpus in "network_cpus". */
static struct cpumask network_cpus_map ;
2013-08-01 11:36:42 -04:00
/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
2012-06-07 10:45:02 +00:00
static char * loopify_link_name ;
2013-08-01 11:36:42 -04:00
/* If "tile_net.custom" was specified, this is true. */
static bool custom_flag ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
static uint jumbo_num ;
2013-08-01 11:36:42 -04:00
/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
static inline int mpipe_instance ( struct net_device * dev )
{
struct tile_net_priv * priv = netdev_priv ( dev ) ;
return priv - > instance ;
}
2012-06-07 10:45:02 +00:00
/* The "tile_net.cpus" argument specifies the cpus that are dedicated
* to handle ingress packets .
*
* The parameter should be in the form " tile_net.cpus=m-n[,x-y] " , where
* m , n , x , y are integer numbers that represent the cpus that can be
* neither a dedicated cpu nor a dataplane cpu .
*/
static bool network_cpus_init ( void )
{
int rc ;
if ( network_cpus_string = = NULL )
return false ;
rc = cpulist_parse_crop ( network_cpus_string , & network_cpus_map ) ;
if ( rc ! = 0 ) {
pr_warn ( " tile_net.cpus=%s: malformed cpu list \n " ,
network_cpus_string ) ;
return false ;
}
/* Remove dedicated cpus. */
cpumask_and ( & network_cpus_map , & network_cpus_map , cpu_possible_mask ) ;
if ( cpumask_empty ( & network_cpus_map ) ) {
pr_warn ( " Ignoring empty tile_net.cpus='%s'. \n " ,
network_cpus_string ) ;
return false ;
}
2015-02-13 14:37:09 -08:00
pr_info ( " Linux network CPUs: %*pbl \n " ,
cpumask_pr_args ( & network_cpus_map ) ) ;
2012-06-07 10:45:02 +00:00
return true ;
}
module_param_named ( cpus , network_cpus_string , charp , 0444 ) ;
MODULE_PARM_DESC ( cpus , " cpulist of cores that handle network interrupts " ) ;
/* The "tile_net.loopify=LINK" argument causes the named device to
* actually use " loop0 " for ingress , and " loop1 " for egress . This
* allows an app to sit between the actual link and linux , passing
* ( some ) packets along to linux , and forwarding ( some ) packets sent
* out by linux .
*/
module_param_named ( loopify , loopify_link_name , charp , 0444 ) ;
MODULE_PARM_DESC ( loopify , " name the device to use loop0/1 for ingress/egress " ) ;
/* The "tile_net.custom" argument causes us to ignore the "conventional"
* classifier metadata , in particular , the " l2_offset " .
*/
2013-08-01 11:36:42 -04:00
module_param_named ( custom , custom_flag , bool , 0444 ) ;
2012-06-07 10:45:02 +00:00
MODULE_PARM_DESC ( custom , " indicates a (heavily) customized classifier " ) ;
2013-08-01 11:36:42 -04:00
/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
* and to allocate the given number of " jumbo " buffers .
*/
module_param_named ( jumbo , jumbo_num , uint , 0444 ) ;
MODULE_PARM_DESC ( jumbo , " the number of buffers to support jumbo packets " ) ;
2012-06-07 10:45:02 +00:00
/* Atomically update a statistics field.
* Note that on TILE - Gx , this operation is fire - and - forget on the
* issuing core ( single - cycle dispatch ) and takes only a few cycles
* longer than a regular store when the request reaches the home cache .
* No expensive bus management overhead is required .
*/
static void tile_net_stats_add ( unsigned long value , unsigned long * field )
{
BUILD_BUG_ON ( sizeof ( atomic_long_t ) ! = sizeof ( unsigned long ) ) ;
atomic_long_add ( value , ( atomic_long_t * ) field ) ;
}
/* Allocate and push a buffer. */
2013-08-01 11:36:42 -04:00
static bool tile_net_provide_buffer ( int instance , int kind )
2012-06-07 10:45:02 +00:00
{
2013-08-01 11:36:42 -04:00
struct mpipe_data * md = & mpipe_data [ instance ] ;
2013-08-01 11:36:42 -04:00
gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums [ kind ] ;
size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size ( bse ) ;
2012-06-07 10:45:02 +00:00
const unsigned long buffer_alignment = 128 ;
struct sk_buff * skb ;
int len ;
2013-08-01 11:36:42 -04:00
len = sizeof ( struct sk_buff * * ) + buffer_alignment + bs ;
2012-06-07 10:45:02 +00:00
skb = dev_alloc_skb ( len ) ;
if ( skb = = NULL )
return false ;
/* Make room for a back-pointer to 'skb' and guarantee alignment. */
skb_reserve ( skb , sizeof ( struct sk_buff * * ) ) ;
skb_reserve ( skb , - ( long ) skb - > data & ( buffer_alignment - 1 ) ) ;
/* Save a back-pointer to 'skb'. */
* ( struct sk_buff * * ) ( skb - > data - sizeof ( struct sk_buff * * ) ) = skb ;
/* Make sure "skb" and the back-pointer have been flushed. */
wmb ( ) ;
2013-08-01 11:36:42 -04:00
gxio_mpipe_push_buffer ( & md - > context , md - > first_buffer_stack + kind ,
2012-06-07 10:45:02 +00:00
( void * ) va_to_tile_io_addr ( skb - > data ) ) ;
return true ;
}
/* Convert a raw mpipe buffer to its matching skb pointer. */
static struct sk_buff * mpipe_buf_to_skb ( void * va )
{
/* Acquire the associated "skb". */
struct sk_buff * * skb_ptr = va - sizeof ( * skb_ptr ) ;
struct sk_buff * skb = * skb_ptr ;
/* Paranoia. */
if ( skb - > data ! = va ) {
/* Panic here since there's a reasonable chance
* that corrupt buffers means generic memory
* corruption , with unpredictable system effects .
*/
panic ( " Corrupt linux buffer! va=%p, skb=%p, skb->data=%p " ,
va , skb , skb - > data ) ;
}
return skb ;
}
2013-08-01 11:36:42 -04:00
static void tile_net_pop_all_buffers ( int instance , int stack )
2012-06-07 10:45:02 +00:00
{
2013-08-01 11:36:42 -04:00
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
for ( ; ; ) {
tile_io_addr_t addr =
2013-08-01 11:36:42 -04:00
( tile_io_addr_t ) gxio_mpipe_pop_buffer ( & md - > context ,
stack ) ;
2012-06-07 10:45:02 +00:00
if ( addr = = 0 )
break ;
dev_kfree_skb_irq ( mpipe_buf_to_skb ( tile_io_addr_to_va ( addr ) ) ) ;
}
}
/* Provide linux buffers to mPIPE. */
static void tile_net_provide_needed_buffers ( void )
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2013-08-01 11:36:42 -04:00
int instance , kind ;
for ( instance = 0 ; instance < NR_MPIPE_MAX & &
info - > mpipe [ instance ] . has_iqueue ; instance + + ) {
for ( kind = 0 ; kind < MAX_KINDS ; kind + + ) {
while ( info - > mpipe [ instance ] . num_needed_buffers [ kind ]
! = 0 ) {
if ( ! tile_net_provide_buffer ( instance , kind ) ) {
pr_notice ( " Tile %d still needs "
" some buffers \n " ,
info - > my_cpu ) ;
return ;
}
info - > mpipe [ instance ] .
num_needed_buffers [ kind ] - - ;
2013-08-01 11:36:42 -04:00
}
}
2012-06-07 10:45:02 +00:00
}
}
2013-08-01 11:36:42 -04:00
/* Get RX timestamp, and store it in the skb. */
static void tile_rx_timestamp ( struct tile_net_priv * priv , struct sk_buff * skb ,
gxio_mpipe_idesc_t * idesc )
{
if ( unlikely ( priv - > stamp_cfg . rx_filter ! = HWTSTAMP_FILTER_NONE ) ) {
struct skb_shared_hwtstamps * shhwtstamps = skb_hwtstamps ( skb ) ;
memset ( shhwtstamps , 0 , sizeof ( * shhwtstamps ) ) ;
shhwtstamps - > hwtstamp = ktime_set ( idesc - > time_stamp_sec ,
idesc - > time_stamp_ns ) ;
}
}
/* Get TX timestamp, and store it in the skb. */
static void tile_tx_timestamp ( struct sk_buff * skb , int instance )
{
struct skb_shared_info * shtx = skb_shinfo ( skb ) ;
if ( unlikely ( ( shtx - > tx_flags & SKBTX_HW_TSTAMP ) ! = 0 ) ) {
struct mpipe_data * md = & mpipe_data [ instance ] ;
struct skb_shared_hwtstamps shhwtstamps ;
2016-06-17 18:15:30 +02:00
struct timespec64 ts ;
2013-08-01 11:36:42 -04:00
shtx - > tx_flags | = SKBTX_IN_PROGRESS ;
gxio_mpipe_get_timestamp ( & md - > context , & ts ) ;
memset ( & shhwtstamps , 0 , sizeof ( shhwtstamps ) ) ;
shhwtstamps . hwtstamp = ktime_set ( ts . tv_sec , ts . tv_nsec ) ;
skb_tstamp_tx ( skb , & shhwtstamps ) ;
}
}
/* Use ioctl() to enable or disable TX or RX timestamping. */
2013-11-18 23:25:20 +00:00
static int tile_hwtstamp_set ( struct net_device * dev , struct ifreq * rq )
2013-08-01 11:36:42 -04:00
{
struct hwtstamp_config config ;
struct tile_net_priv * priv = netdev_priv ( dev ) ;
if ( copy_from_user ( & config , rq - > ifr_data , sizeof ( config ) ) )
return - EFAULT ;
if ( config . flags ) /* reserved for future extensions */
return - EINVAL ;
switch ( config . tx_type ) {
case HWTSTAMP_TX_OFF :
case HWTSTAMP_TX_ON :
break ;
default :
return - ERANGE ;
}
switch ( config . rx_filter ) {
case HWTSTAMP_FILTER_NONE :
break ;
case HWTSTAMP_FILTER_ALL :
case HWTSTAMP_FILTER_SOME :
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT :
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC :
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ :
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT :
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC :
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ :
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT :
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC :
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ :
case HWTSTAMP_FILTER_PTP_V2_EVENT :
case HWTSTAMP_FILTER_PTP_V2_SYNC :
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ :
config . rx_filter = HWTSTAMP_FILTER_ALL ;
break ;
default :
return - ERANGE ;
}
if ( copy_to_user ( rq - > ifr_data , & config , sizeof ( config ) ) )
return - EFAULT ;
priv - > stamp_cfg = config ;
return 0 ;
}
2013-11-18 23:25:20 +00:00
static int tile_hwtstamp_get ( struct net_device * dev , struct ifreq * rq )
{
struct tile_net_priv * priv = netdev_priv ( dev ) ;
if ( copy_to_user ( rq - > ifr_data , & priv - > stamp_cfg ,
sizeof ( priv - > stamp_cfg ) ) )
return - EFAULT ;
return 0 ;
}
2012-06-07 10:45:02 +00:00
static inline bool filter_packet ( struct net_device * dev , void * buf )
{
/* Filter packets received before we're up. */
if ( dev = = NULL | | ! ( dev - > flags & IFF_UP ) )
return true ;
/* Filter out packets that aren't for us. */
if ( ! ( dev - > flags & IFF_PROMISC ) & &
! is_multicast_ether_addr ( buf ) & &
drivers/net: Convert uses of compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script: (and a little typing)
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-01 11:51:23 -07:00
! ether_addr_equal ( dev - > dev_addr , buf ) )
2012-06-07 10:45:02 +00:00
return true ;
return false ;
}
static void tile_net_receive_skb ( struct net_device * dev , struct sk_buff * skb ,
gxio_mpipe_idesc_t * idesc , unsigned long len )
{
2014-08-17 12:30:38 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2013-08-01 11:36:42 -04:00
struct tile_net_priv * priv = netdev_priv ( dev ) ;
int instance = priv - > instance ;
2012-06-07 10:45:02 +00:00
/* Encode the actual packet length. */
skb_put ( skb , len ) ;
skb - > protocol = eth_type_trans ( skb , dev ) ;
/* Acknowledge "good" hardware checksums. */
if ( idesc - > cs & & idesc - > csum_seed_val = = 0xFFFF )
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2013-08-01 11:36:42 -04:00
/* Get RX timestamp from idesc. */
tile_rx_timestamp ( priv , skb , idesc ) ;
2013-08-01 11:36:42 -04:00
napi_gro_receive ( & info - > mpipe [ instance ] . napi , skb ) ;
2012-06-07 10:45:02 +00:00
/* Update stats. */
2013-08-01 11:36:42 -04:00
tile_net_stats_add ( 1 , & dev - > stats . rx_packets ) ;
tile_net_stats_add ( len , & dev - > stats . rx_bytes ) ;
2012-06-07 10:45:02 +00:00
/* Need a new buffer. */
2013-08-01 11:36:42 -04:00
if ( idesc - > size = = buffer_size_enums [ 0 ] )
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . num_needed_buffers [ 0 ] + + ;
2013-08-01 11:36:42 -04:00
else if ( idesc - > size = = buffer_size_enums [ 1 ] )
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . num_needed_buffers [ 1 ] + + ;
2012-06-07 10:45:02 +00:00
else
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . num_needed_buffers [ 2 ] + + ;
2012-06-07 10:45:02 +00:00
}
/* Handle a packet. Return true if "processed", false if "filtered". */
2013-08-01 11:36:42 -04:00
static bool tile_net_handle_packet ( int instance , gxio_mpipe_idesc_t * idesc )
2012-06-07 10:45:02 +00:00
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2013-08-01 11:36:42 -04:00
struct mpipe_data * md = & mpipe_data [ instance ] ;
struct net_device * dev = md - > tile_net_devs_for_channel [ idesc - > channel ] ;
2012-06-07 10:45:02 +00:00
uint8_t l2_offset ;
void * va ;
void * buf ;
unsigned long len ;
bool filter ;
2013-08-01 11:36:42 -04:00
/* Drop packets for which no buffer was available (which can
* happen under heavy load ) , or for which the me / tr / ce flags
* are set ( which can happen for jumbo cut - through packets ,
* or with a customized classifier ) .
2012-06-07 10:45:02 +00:00
*/
2013-08-01 11:36:42 -04:00
if ( idesc - > be | | idesc - > me | | idesc - > tr | | idesc - > ce ) {
if ( dev )
2013-08-01 11:36:42 -04:00
tile_net_stats_add ( 1 , & dev - > stats . rx_errors ) ;
2013-08-01 11:36:42 -04:00
goto drop ;
2012-06-07 10:45:02 +00:00
}
/* Get the "l2_offset", if allowed. */
2013-08-01 11:36:42 -04:00
l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset ( idesc ) ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
/* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
va = tile_io_addr_to_va ( ( unsigned long ) idesc - > va ) ;
2012-06-07 10:45:02 +00:00
/* Get the actual packet start/length. */
buf = va + l2_offset ;
len = idesc - > l2_size - l2_offset ;
/* Point "va" at the raw buffer. */
va - = NET_IP_ALIGN ;
filter = filter_packet ( dev , buf ) ;
if ( filter ) {
2013-08-01 11:36:42 -04:00
if ( dev )
2013-08-01 11:36:42 -04:00
tile_net_stats_add ( 1 , & dev - > stats . rx_dropped ) ;
2013-08-01 11:36:42 -04:00
drop :
2013-08-01 11:36:42 -04:00
gxio_mpipe_iqueue_drop ( & info - > mpipe [ instance ] . iqueue , idesc ) ;
2012-06-07 10:45:02 +00:00
} else {
struct sk_buff * skb = mpipe_buf_to_skb ( va ) ;
/* Skip headroom, and any custom header. */
skb_reserve ( skb , NET_IP_ALIGN + l2_offset ) ;
tile_net_receive_skb ( dev , skb , idesc , len ) ;
}
2013-08-01 11:36:42 -04:00
gxio_mpipe_iqueue_consume ( & info - > mpipe [ instance ] . iqueue , idesc ) ;
2012-06-07 10:45:02 +00:00
return ! filter ;
}
/* Handle some packets for the current CPU.
*
* This function handles up to TILE_NET_BATCH idescs per call .
*
* ISSUE : Since we do not provide new buffers until this function is
* complete , we must initially provide enough buffers for each network
* cpu to fill its iqueue and also its batched idescs .
*
* ISSUE : The " rotting packet " race condition occurs if a packet
* arrives after the queue appears to be empty , and before the
* hypervisor interrupt is re - enabled .
*/
static int tile_net_poll ( struct napi_struct * napi , int budget )
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2012-06-07 10:45:02 +00:00
unsigned int work = 0 ;
gxio_mpipe_idesc_t * idesc ;
2013-08-01 11:36:42 -04:00
int instance , i , n ;
struct mpipe_data * md ;
struct info_mpipe * info_mpipe =
container_of ( napi , struct info_mpipe , napi ) ;
2014-03-14 18:08:21 -07:00
if ( budget < = 0 )
goto done ;
2013-08-01 11:36:42 -04:00
instance = info_mpipe - > instance ;
while ( ( n = gxio_mpipe_iqueue_try_peek (
& info_mpipe - > iqueue ,
& idesc ) ) > 0 ) {
2012-06-07 10:45:02 +00:00
for ( i = 0 ; i < n ; i + + ) {
if ( i = = TILE_NET_BATCH )
goto done ;
2013-08-01 11:36:42 -04:00
if ( tile_net_handle_packet ( instance ,
idesc + i ) ) {
2012-06-07 10:45:02 +00:00
if ( + + work > = budget )
goto done ;
}
}
}
/* There are no packets left. */
2013-08-01 11:36:42 -04:00
napi_complete ( & info_mpipe - > napi ) ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
/* Re-enable hypervisor interrupts. */
2013-08-01 11:36:42 -04:00
gxio_mpipe_enable_notif_ring_interrupt (
& md - > context , info - > mpipe [ instance ] . iqueue . ring ) ;
2012-06-07 10:45:02 +00:00
/* HACK: Avoid the "rotting packet" problem. */
2013-08-01 11:36:42 -04:00
if ( gxio_mpipe_iqueue_try_peek ( & info_mpipe - > iqueue , & idesc ) > 0 )
napi_schedule ( & info_mpipe - > napi ) ;
2012-06-07 10:45:02 +00:00
/* ISSUE: Handle completions? */
done :
tile_net_provide_needed_buffers ( ) ;
return work ;
}
2013-08-01 11:36:42 -04:00
/* Handle an ingress interrupt from an instance on the current cpu. */
static irqreturn_t tile_net_handle_ingress_irq ( int irq , void * id )
2012-06-07 10:45:02 +00:00
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2013-08-01 11:36:42 -04:00
napi_schedule ( & info - > mpipe [ ( uint64_t ) id ] . napi ) ;
2012-06-07 10:45:02 +00:00
return IRQ_HANDLED ;
}
/* Free some completions. This must be called with interrupts blocked. */
static int tile_net_free_comps ( gxio_mpipe_equeue_t * equeue ,
struct tile_net_comps * comps ,
int limit , bool force_update )
{
int n = 0 ;
while ( comps - > comp_last < comps - > comp_next ) {
unsigned int cid = comps - > comp_last % TILE_NET_MAX_COMPS ;
struct tile_net_comp * comp = & comps - > comp_queue [ cid ] ;
if ( ! gxio_mpipe_equeue_is_complete ( equeue , comp - > when ,
force_update | | n = = 0 ) )
break ;
dev_kfree_skb_irq ( comp - > skb ) ;
comps - > comp_last + + ;
if ( + + n = = limit )
break ;
}
return n ;
}
/* Add a completion. This must be called with interrupts blocked.
* tile_net_equeue_try_reserve ( ) will have ensured a free completion entry .
*/
static void add_comp ( gxio_mpipe_equeue_t * equeue ,
struct tile_net_comps * comps ,
uint64_t when , struct sk_buff * skb )
{
int cid = comps - > comp_next % TILE_NET_MAX_COMPS ;
comps - > comp_queue [ cid ] . when = when ;
comps - > comp_queue [ cid ] . skb = skb ;
comps - > comp_next + + ;
}
2012-07-01 14:43:47 -04:00
static void tile_net_schedule_tx_wake_timer ( struct net_device * dev ,
int tx_queue_idx )
2012-06-07 10:45:02 +00:00
{
2012-07-01 14:43:47 -04:00
struct tile_net_info * info = & per_cpu ( per_cpu_info , tx_queue_idx ) ;
2012-06-07 10:45:02 +00:00
struct tile_net_priv * priv = netdev_priv ( dev ) ;
2013-08-01 11:36:42 -04:00
int instance = priv - > instance ;
struct tile_net_tx_wake * tx_wake =
& info - > mpipe [ instance ] . tx_wake [ priv - > echannel ] ;
2012-06-07 10:45:02 +00:00
2012-07-01 14:43:47 -04:00
hrtimer_start ( & tx_wake - > timer ,
2012-06-07 10:45:02 +00:00
ktime_set ( 0 , TX_TIMER_DELAY_USEC * 1000UL ) ,
HRTIMER_MODE_REL_PINNED ) ;
}
static enum hrtimer_restart tile_net_handle_tx_wake_timer ( struct hrtimer * t )
{
struct tile_net_tx_wake * tx_wake =
container_of ( t , struct tile_net_tx_wake , timer ) ;
2012-07-01 14:43:47 -04:00
netif_wake_subqueue ( tx_wake - > dev , tx_wake - > tx_queue_idx ) ;
2012-06-07 10:45:02 +00:00
return HRTIMER_NORESTART ;
}
/* Make sure the egress timer is scheduled. */
static void tile_net_schedule_egress_timer ( void )
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2012-06-07 10:45:02 +00:00
if ( ! info - > egress_timer_scheduled ) {
hrtimer_start ( & info - > egress_timer ,
ktime_set ( 0 , EGRESS_TIMER_DELAY_USEC * 1000UL ) ,
HRTIMER_MODE_REL_PINNED ) ;
info - > egress_timer_scheduled = true ;
}
}
/* The "function" for "info->egress_timer".
*
* This timer will reschedule itself as long as there are any pending
* completions expected for this tile .
*/
static enum hrtimer_restart tile_net_handle_egress_timer ( struct hrtimer * t )
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2012-06-07 10:45:02 +00:00
unsigned long irqflags ;
bool pending = false ;
2013-08-01 11:36:42 -04:00
int i , instance ;
2012-06-07 10:45:02 +00:00
local_irq_save ( irqflags ) ;
/* The timer is no longer scheduled. */
info - > egress_timer_scheduled = false ;
/* Free all possible comps for this tile. */
2013-08-01 11:36:42 -04:00
for ( instance = 0 ; instance < NR_MPIPE_MAX & &
info - > mpipe [ instance ] . has_iqueue ; instance + + ) {
for ( i = 0 ; i < TILE_NET_CHANNELS ; i + + ) {
struct tile_net_egress * egress =
& mpipe_data [ instance ] . egress_for_echannel [ i ] ;
struct tile_net_comps * comps =
info - > mpipe [ instance ] . comps_for_echannel [ i ] ;
if ( ! egress | | comps - > comp_last > = comps - > comp_next )
continue ;
tile_net_free_comps ( egress - > equeue , comps , - 1 , true ) ;
pending = pending | |
( comps - > comp_last < comps - > comp_next ) ;
}
2012-06-07 10:45:02 +00:00
}
/* Reschedule timer if needed. */
if ( pending )
tile_net_schedule_egress_timer ( ) ;
local_irq_restore ( irqflags ) ;
return HRTIMER_NORESTART ;
}
2013-08-01 11:36:42 -04:00
/* PTP clock operations. */
static int ptp_mpipe_adjfreq ( struct ptp_clock_info * ptp , s32 ppb )
{
int ret = 0 ;
struct mpipe_data * md = container_of ( ptp , struct mpipe_data , caps ) ;
mutex_lock ( & md - > ptp_lock ) ;
if ( gxio_mpipe_adjust_timestamp_freq ( & md - > context , ppb ) )
ret = - EINVAL ;
mutex_unlock ( & md - > ptp_lock ) ;
return ret ;
}
static int ptp_mpipe_adjtime ( struct ptp_clock_info * ptp , s64 delta )
{
int ret = 0 ;
struct mpipe_data * md = container_of ( ptp , struct mpipe_data , caps ) ;
mutex_lock ( & md - > ptp_lock ) ;
if ( gxio_mpipe_adjust_timestamp ( & md - > context , delta ) )
ret = - EBUSY ;
mutex_unlock ( & md - > ptp_lock ) ;
return ret ;
}
2015-03-29 23:12:09 +02:00
static int ptp_mpipe_gettime ( struct ptp_clock_info * ptp ,
struct timespec64 * ts )
2013-08-01 11:36:42 -04:00
{
int ret = 0 ;
struct mpipe_data * md = container_of ( ptp , struct mpipe_data , caps ) ;
mutex_lock ( & md - > ptp_lock ) ;
if ( gxio_mpipe_get_timestamp ( & md - > context , ts ) )
ret = - EBUSY ;
mutex_unlock ( & md - > ptp_lock ) ;
return ret ;
}
static int ptp_mpipe_settime ( struct ptp_clock_info * ptp ,
2015-03-29 23:12:09 +02:00
const struct timespec64 * ts )
2013-08-01 11:36:42 -04:00
{
int ret = 0 ;
struct mpipe_data * md = container_of ( ptp , struct mpipe_data , caps ) ;
mutex_lock ( & md - > ptp_lock ) ;
if ( gxio_mpipe_set_timestamp ( & md - > context , ts ) )
ret = - EBUSY ;
mutex_unlock ( & md - > ptp_lock ) ;
return ret ;
}
static int ptp_mpipe_enable ( struct ptp_clock_info * ptp ,
struct ptp_clock_request * request , int on )
{
return - EOPNOTSUPP ;
}
static struct ptp_clock_info ptp_mpipe_caps = {
. owner = THIS_MODULE ,
. name = " mPIPE clock " ,
. max_adj = 999999999 ,
. n_ext_ts = 0 ,
2014-03-20 22:21:55 +01:00
. n_pins = 0 ,
2013-08-01 11:36:42 -04:00
. pps = 0 ,
. adjfreq = ptp_mpipe_adjfreq ,
. adjtime = ptp_mpipe_adjtime ,
2015-03-29 23:12:09 +02:00
. gettime64 = ptp_mpipe_gettime ,
. settime64 = ptp_mpipe_settime ,
2013-08-01 11:36:42 -04:00
. enable = ptp_mpipe_enable ,
} ;
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock ( struct net_device * dev , struct mpipe_data * md )
{
2016-06-17 18:15:30 +02:00
struct timespec64 ts ;
2013-08-01 11:36:42 -04:00
2016-06-17 18:15:30 +02:00
ktime_get_ts64 ( & ts ) ;
2013-08-01 11:36:42 -04:00
gxio_mpipe_set_timestamp ( & md - > context , & ts ) ;
mutex_init ( & md - > ptp_lock ) ;
md - > caps = ptp_mpipe_caps ;
md - > ptp_clock = ptp_clock_register ( & md - > caps , NULL ) ;
if ( IS_ERR ( md - > ptp_clock ) )
netdev_err ( dev , " ptp_clock_register failed %ld \n " ,
PTR_ERR ( md - > ptp_clock ) ) ;
}
/* Initialize PTP fields in a new device. */
static void init_ptp_dev ( struct tile_net_priv * priv )
{
priv - > stamp_cfg . rx_filter = HWTSTAMP_FILTER_NONE ;
priv - > stamp_cfg . tx_type = HWTSTAMP_TX_OFF ;
}
2013-08-01 11:36:42 -04:00
/* Helper functions for "tile_net_update()". */
static void enable_ingress_irq ( void * irq )
2012-06-07 10:45:02 +00:00
{
2013-08-01 11:36:42 -04:00
enable_percpu_irq ( ( long ) irq , 0 ) ;
}
static void disable_ingress_irq ( void * irq )
{
disable_percpu_irq ( ( long ) irq ) ;
2012-06-07 10:45:02 +00:00
}
/* Helper function for tile_net_open() and tile_net_stop().
* Always called under tile_net_devs_for_channel_mutex .
*/
static int tile_net_update ( struct net_device * dev )
{
static gxio_mpipe_rules_t rules ; /* too big to fit on the stack */
bool saw_channel = false ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
int channel ;
int rc ;
int cpu ;
2013-08-01 11:36:42 -04:00
saw_channel = false ;
gxio_mpipe_rules_init ( & rules , & md - > context ) ;
2012-06-07 10:45:02 +00:00
for ( channel = 0 ; channel < TILE_NET_CHANNELS ; channel + + ) {
2013-08-01 11:36:42 -04:00
if ( md - > tile_net_devs_for_channel [ channel ] = = NULL )
2012-06-07 10:45:02 +00:00
continue ;
if ( ! saw_channel ) {
saw_channel = true ;
2013-08-01 11:36:42 -04:00
gxio_mpipe_rules_begin ( & rules , md - > first_bucket ,
md - > num_buckets , NULL ) ;
2012-06-07 10:45:02 +00:00
gxio_mpipe_rules_set_headroom ( & rules , NET_IP_ALIGN ) ;
}
gxio_mpipe_rules_add_channel ( & rules , channel ) ;
}
/* NOTE: This can fail if there is no classifier.
* ISSUE : Can anything else cause it to fail ?
*/
rc = gxio_mpipe_rules_commit ( & rules ) ;
if ( rc ! = 0 ) {
2013-08-01 11:36:42 -04:00
netdev_warn ( dev , " gxio_mpipe_rules_commit: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
return - EIO ;
}
2013-08-01 11:36:42 -04:00
/* Update all cpus, sequentially (to protect "netif_napi_add()").
* We use on_each_cpu to handle the IPI mask or unmask .
*/
if ( ! saw_channel )
2013-08-01 11:36:42 -04:00
on_each_cpu ( disable_ingress_irq ,
( void * ) ( long ) ( md - > ingress_irq ) , 1 ) ;
2013-08-01 11:36:42 -04:00
for_each_online_cpu ( cpu ) {
struct tile_net_info * info = & per_cpu ( per_cpu_info , cpu ) ;
2013-08-01 11:36:42 -04:00
if ( ! info - > mpipe [ instance ] . has_iqueue )
2013-08-01 11:36:42 -04:00
continue ;
if ( saw_channel ) {
2013-08-01 11:36:42 -04:00
if ( ! info - > mpipe [ instance ] . napi_added ) {
netif_napi_add ( dev , & info - > mpipe [ instance ] . napi ,
2013-08-01 11:36:42 -04:00
tile_net_poll , TILE_NET_WEIGHT ) ;
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . napi_added = true ;
2013-08-01 11:36:42 -04:00
}
2013-08-01 11:36:42 -04:00
if ( ! info - > mpipe [ instance ] . napi_enabled ) {
napi_enable ( & info - > mpipe [ instance ] . napi ) ;
info - > mpipe [ instance ] . napi_enabled = true ;
2013-08-01 11:36:42 -04:00
}
} else {
2013-08-01 11:36:42 -04:00
if ( info - > mpipe [ instance ] . napi_enabled ) {
napi_disable ( & info - > mpipe [ instance ] . napi ) ;
info - > mpipe [ instance ] . napi_enabled = false ;
2013-08-01 11:36:42 -04:00
}
/* FIXME: Drain the iqueue. */
}
}
if ( saw_channel )
2013-08-01 11:36:42 -04:00
on_each_cpu ( enable_ingress_irq ,
( void * ) ( long ) ( md - > ingress_irq ) , 1 ) ;
2012-06-07 10:45:02 +00:00
/* HACK: Allow packets to flow in the simulator. */
if ( saw_channel )
2013-08-01 11:36:42 -04:00
sim_enable_mpipe_links ( instance , - 1 ) ;
2012-06-07 10:45:02 +00:00
return 0 ;
}
2013-08-01 11:36:42 -04:00
/* Initialize a buffer stack. */
static int create_buffer_stack ( struct net_device * dev ,
int kind , size_t num_buffers )
2012-06-07 10:45:02 +00:00
{
pte_t hash_pte = pte_set_home ( ( pte_t ) { 0 } , PAGE_HOME_HASH ) ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2013-08-01 11:36:42 -04:00
size_t needed = gxio_mpipe_calc_buffer_stack_bytes ( num_buffers ) ;
2013-08-01 11:36:42 -04:00
int stack_idx = md - > first_buffer_stack + kind ;
2013-08-01 11:36:42 -04:00
void * va ;
int i , rc ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
/* Round up to 64KB and then use alloc_pages() so we get the
* required 64 KB alignment .
2012-06-07 10:45:02 +00:00
*/
2013-08-01 11:36:42 -04:00
md - > buffer_stack_bytes [ kind ] =
ALIGN ( needed , 64 * 1024 ) ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
va = alloc_pages_exact ( md - > buffer_stack_bytes [ kind ] , GFP_KERNEL ) ;
2013-08-01 11:36:42 -04:00
if ( va = = NULL ) {
2012-06-07 10:45:02 +00:00
netdev_err ( dev ,
2013-08-01 11:36:42 -04:00
" Could not alloc %zd bytes for buffer stack %d \n " ,
2013-08-01 11:36:42 -04:00
md - > buffer_stack_bytes [ kind ] , kind ) ;
2012-06-07 10:45:02 +00:00
return - ENOMEM ;
}
2013-08-01 11:36:42 -04:00
/* Initialize the buffer stack. */
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_init_buffer_stack ( & md - > context , stack_idx ,
buffer_size_enums [ kind ] , va ,
md - > buffer_stack_bytes [ kind ] , 0 ) ;
2012-06-07 10:45:02 +00:00
if ( rc ! = 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " gxio_mpipe_init_buffer_stack: mpipe[%d] %d \n " ,
instance , rc ) ;
free_pages_exact ( va , md - > buffer_stack_bytes [ kind ] ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
2013-08-01 11:36:42 -04:00
2013-08-01 11:36:42 -04:00
md - > buffer_stack_vas [ kind ] = va ;
2013-08-01 11:36:42 -04:00
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_register_client_memory ( & md - > context , stack_idx ,
2012-06-07 10:45:02 +00:00
hash_pte , 0 ) ;
if ( rc ! = 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev ,
" gxio_mpipe_register_client_memory: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
2013-08-01 11:36:42 -04:00
/* Provide initial buffers. */
for ( i = 0 ; i < num_buffers ; i + + ) {
2013-08-01 11:36:42 -04:00
if ( ! tile_net_provide_buffer ( instance , kind ) ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " Cannot allocate initial sk_bufs! \n " ) ;
return - ENOMEM ;
}
2012-06-07 10:45:02 +00:00
}
2013-08-01 11:36:42 -04:00
return 0 ;
}
/* Allocate and initialize mpipe buffer stacks, and register them in
* the mPIPE TLBs , for small , large , and ( possibly ) jumbo packet sizes .
* This routine supports tile_net_init_mpipe ( ) , below .
*/
static int init_buffer_stacks ( struct net_device * dev ,
int network_cpus_count )
{
int num_kinds = MAX_KINDS - ( jumbo_num = = 0 ) ;
size_t num_buffers ;
int rc ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2013-08-01 11:36:42 -04:00
/* Allocate the buffer stacks. */
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_alloc_buffer_stacks ( & md - > context , num_kinds , 0 , 0 ) ;
2013-08-01 11:36:42 -04:00
if ( rc < 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev ,
" gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
2013-08-01 11:36:42 -04:00
md - > first_buffer_stack = rc ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
/* Enough small/large buffers to (normally) avoid buffer errors. */
num_buffers =
network_cpus_count * ( IQUEUE_ENTRIES + TILE_NET_BATCH ) ;
/* Allocate the small memory stack. */
if ( rc > = 0 )
rc = create_buffer_stack ( dev , 0 , num_buffers ) ;
/* Allocate the large buffer stack. */
if ( rc > = 0 )
rc = create_buffer_stack ( dev , 1 , num_buffers ) ;
/* Allocate the jumbo buffer stack if needed. */
if ( rc > = 0 & & jumbo_num ! = 0 )
rc = create_buffer_stack ( dev , 2 , jumbo_num ) ;
return rc ;
2012-06-07 10:45:02 +00:00
}
/* Allocate per-cpu resources (memory for completions and idescs).
* This routine supports tile_net_init_mpipe ( ) , below .
*/
static int alloc_percpu_mpipe_resources ( struct net_device * dev ,
int cpu , int ring )
{
struct tile_net_info * info = & per_cpu ( per_cpu_info , cpu ) ;
int order , i , rc ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
struct page * page ;
void * addr ;
/* Allocate the "comps". */
order = get_order ( COMPS_SIZE ) ;
page = homecache_alloc_pages ( GFP_KERNEL , order , cpu ) ;
if ( page = = NULL ) {
netdev_err ( dev , " Failed to alloc %zd bytes comps memory \n " ,
COMPS_SIZE ) ;
return - ENOMEM ;
}
addr = pfn_to_kaddr ( page_to_pfn ( page ) ) ;
memset ( addr , 0 , COMPS_SIZE ) ;
for ( i = 0 ; i < TILE_NET_CHANNELS ; i + + )
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . comps_for_echannel [ i ] =
2012-06-07 10:45:02 +00:00
addr + i * sizeof ( struct tile_net_comps ) ;
/* If this is a network cpu, create an iqueue. */
2015-03-05 10:49:16 +10:30
if ( cpumask_test_cpu ( cpu , & network_cpus_map ) ) {
2012-06-07 10:45:02 +00:00
order = get_order ( NOTIF_RING_SIZE ) ;
page = homecache_alloc_pages ( GFP_KERNEL , order , cpu ) ;
if ( page = = NULL ) {
netdev_err ( dev ,
" Failed to alloc %zd bytes iqueue memory \n " ,
NOTIF_RING_SIZE ) ;
return - ENOMEM ;
}
addr = pfn_to_kaddr ( page_to_pfn ( page ) ) ;
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_iqueue_init ( & info - > mpipe [ instance ] . iqueue ,
& md - > context , ring + + , addr ,
NOTIF_RING_SIZE , 0 ) ;
2012-06-07 10:45:02 +00:00
if ( rc < 0 ) {
netdev_err ( dev ,
" gxio_mpipe_iqueue_init failed: %d \n " , rc ) ;
return rc ;
}
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . has_iqueue = true ;
2012-06-07 10:45:02 +00:00
}
return ring ;
}
/* Initialize NotifGroup and buckets.
* This routine supports tile_net_init_mpipe ( ) , below .
*/
static int init_notif_group_and_buckets ( struct net_device * dev ,
int ring , int network_cpus_count )
{
int group , rc ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
/* Allocate one NotifGroup. */
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_alloc_notif_groups ( & md - > context , 1 , 0 , 0 ) ;
2012-06-07 10:45:02 +00:00
if ( rc < 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " gxio_mpipe_alloc_notif_groups: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
group = rc ;
/* Initialize global num_buckets value. */
if ( network_cpus_count > 4 )
2013-08-01 11:36:42 -04:00
md - > num_buckets = 256 ;
2012-06-07 10:45:02 +00:00
else if ( network_cpus_count > 1 )
2013-08-01 11:36:42 -04:00
md - > num_buckets = 16 ;
2012-06-07 10:45:02 +00:00
/* Allocate some buckets, and set global first_bucket value. */
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_alloc_buckets ( & md - > context , md - > num_buckets , 0 , 0 ) ;
2012-06-07 10:45:02 +00:00
if ( rc < 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " gxio_mpipe_alloc_buckets: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
2013-08-01 11:36:42 -04:00
md - > first_bucket = rc ;
2012-06-07 10:45:02 +00:00
/* Init group and buckets. */
rc = gxio_mpipe_init_notif_group_and_buckets (
2013-08-01 11:36:42 -04:00
& md - > context , group , ring , network_cpus_count ,
md - > first_bucket , md - > num_buckets ,
2012-06-07 10:45:02 +00:00
GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY ) ;
if ( rc ! = 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " gxio_mpipe_init_notif_group_and_buckets: "
" mpipe[%d] %d \n " , instance , rc ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
return 0 ;
}
/* Create an irq and register it, then activate the irq and request
* interrupts on all cores . Note that " ingress_irq " being initialized
* is how we know not to call tile_net_init_mpipe ( ) again .
* This routine supports tile_net_init_mpipe ( ) , below .
*/
static int tile_net_setup_interrupts ( struct net_device * dev )
{
2013-08-01 11:36:42 -04:00
int cpu , rc , irq ;
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
irq = md - > ingress_irq ;
if ( irq < 0 ) {
2014-05-07 15:44:14 +00:00
irq = irq_alloc_hwirq ( - 1 ) ;
if ( ! irq ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev ,
" create_irq failed: mpipe[%d] %d \n " ,
instance , irq ) ;
return irq ;
}
tile_irq_activate ( irq , TILE_IRQ_PERCPU ) ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
rc = request_irq ( irq , tile_net_handle_ingress_irq ,
0 , " tile_net " , ( void * ) ( ( uint64_t ) instance ) ) ;
if ( rc ! = 0 ) {
netdev_err ( dev , " request_irq failed: mpipe[%d] %d \n " ,
instance , rc ) ;
2014-05-07 15:44:14 +00:00
irq_free_hwirq ( irq ) ;
2013-08-01 11:36:42 -04:00
return rc ;
}
md - > ingress_irq = irq ;
2012-06-07 10:45:02 +00:00
}
for_each_online_cpu ( cpu ) {
struct tile_net_info * info = & per_cpu ( per_cpu_info , cpu ) ;
2013-08-01 11:36:42 -04:00
if ( info - > mpipe [ instance ] . has_iqueue ) {
gxio_mpipe_request_notif_ring_interrupt ( & md - > context ,
cpu_x ( cpu ) , cpu_y ( cpu ) , KERNEL_PL , irq ,
info - > mpipe [ instance ] . iqueue . ring ) ;
2012-06-07 10:45:02 +00:00
}
}
return 0 ;
}
/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
2013-08-01 11:36:42 -04:00
static void tile_net_init_mpipe_fail ( int instance )
2012-06-07 10:45:02 +00:00
{
2013-08-01 11:36:42 -04:00
int kind , cpu ;
2013-08-01 11:36:42 -04:00
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
/* Do cleanups that require the mpipe context first. */
2013-08-01 11:36:42 -04:00
for ( kind = 0 ; kind < MAX_KINDS ; kind + + ) {
2013-08-01 11:36:42 -04:00
if ( md - > buffer_stack_vas [ kind ] ! = NULL ) {
tile_net_pop_all_buffers ( instance ,
md - > first_buffer_stack +
kind ) ;
2013-08-01 11:36:42 -04:00
}
}
2012-06-07 10:45:02 +00:00
/* Destroy mpipe context so the hardware no longer owns any memory. */
2013-08-01 11:36:42 -04:00
gxio_mpipe_destroy ( & md - > context ) ;
2012-06-07 10:45:02 +00:00
for_each_online_cpu ( cpu ) {
struct tile_net_info * info = & per_cpu ( per_cpu_info , cpu ) ;
2013-08-01 11:36:42 -04:00
free_pages (
( unsigned long ) (
info - > mpipe [ instance ] . comps_for_echannel [ 0 ] ) ,
get_order ( COMPS_SIZE ) ) ;
info - > mpipe [ instance ] . comps_for_echannel [ 0 ] = NULL ;
free_pages ( ( unsigned long ) ( info - > mpipe [ instance ] . iqueue . idescs ) ,
2012-06-07 10:45:02 +00:00
get_order ( NOTIF_RING_SIZE ) ) ;
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . iqueue . idescs = NULL ;
2012-06-07 10:45:02 +00:00
}
2013-08-01 11:36:42 -04:00
for ( kind = 0 ; kind < MAX_KINDS ; kind + + ) {
2013-08-01 11:36:42 -04:00
if ( md - > buffer_stack_vas [ kind ] ! = NULL ) {
free_pages_exact ( md - > buffer_stack_vas [ kind ] ,
md - > buffer_stack_bytes [ kind ] ) ;
md - > buffer_stack_vas [ kind ] = NULL ;
2013-08-01 11:36:42 -04:00
}
}
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
md - > first_buffer_stack = - 1 ;
md - > first_bucket = - 1 ;
2012-06-07 10:45:02 +00:00
}
/* The first time any tilegx network device is opened, we initialize
* the global mpipe state . If this step fails , we fail to open the
* device , but if it succeeds , we never need to do it again , and since
* tile_net can ' t be unloaded , we never undo it .
*
* Note that some resources in this path ( buffer stack indices ,
* bindings from init_buffer_stack , etc . ) are hypervisor resources
* that are freed implicitly by gxio_mpipe_destroy ( ) .
*/
static int tile_net_init_mpipe ( struct net_device * dev )
{
2013-08-01 11:36:42 -04:00
int rc ;
2012-06-07 10:45:02 +00:00
int cpu ;
int first_ring , ring ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2015-03-05 10:49:16 +10:30
int network_cpus_count = cpumask_weight ( & network_cpus_map ) ;
2012-06-07 10:45:02 +00:00
if ( ! hash_default ) {
netdev_err ( dev , " Networking requires hash_default! \n " ) ;
return - EIO ;
}
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_init ( & md - > context , instance ) ;
2012-06-07 10:45:02 +00:00
if ( rc ! = 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " gxio_mpipe_init: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
return - EIO ;
}
/* Set up the buffer stacks. */
2013-08-01 11:36:42 -04:00
rc = init_buffer_stacks ( dev , network_cpus_count ) ;
2012-06-07 10:45:02 +00:00
if ( rc ! = 0 )
goto fail ;
/* Allocate one NotifRing for each network cpu. */
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_alloc_notif_rings ( & md - > context ,
network_cpus_count , 0 , 0 ) ;
2012-06-07 10:45:02 +00:00
if ( rc < 0 ) {
netdev_err ( dev , " gxio_mpipe_alloc_notif_rings failed %d \n " ,
rc ) ;
goto fail ;
}
/* Init NotifRings per-cpu. */
first_ring = rc ;
ring = first_ring ;
for_each_online_cpu ( cpu ) {
rc = alloc_percpu_mpipe_resources ( dev , cpu , ring ) ;
if ( rc < 0 )
goto fail ;
ring = rc ;
}
/* Initialize NotifGroup and buckets. */
rc = init_notif_group_and_buckets ( dev , first_ring , network_cpus_count ) ;
if ( rc ! = 0 )
goto fail ;
/* Create and enable interrupts. */
rc = tile_net_setup_interrupts ( dev ) ;
if ( rc ! = 0 )
goto fail ;
2013-08-01 11:36:42 -04:00
/* Register PTP clock and set mPIPE timestamp, if configured. */
register_ptp_clock ( dev , md ) ;
2012-06-07 10:45:02 +00:00
return 0 ;
fail :
2013-08-01 11:36:42 -04:00
tile_net_init_mpipe_fail ( instance ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
/* Create persistent egress info for a given egress channel.
* Note that this may be shared between , say , " gbe0 " and " xgbe0 " .
* ISSUE : Defer header allocation until TSO is actually needed ?
*/
static int tile_net_init_egress ( struct net_device * dev , int echannel )
{
2013-08-01 11:36:42 -04:00
static int ering = - 1 ;
2012-06-07 10:45:02 +00:00
struct page * headers_page , * edescs_page , * equeue_page ;
gxio_mpipe_edesc_t * edescs ;
gxio_mpipe_equeue_t * equeue ;
unsigned char * headers ;
int headers_order , edescs_order , equeue_order ;
size_t edescs_size ;
int rc = - ENOMEM ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
/* Only initialize once. */
2013-08-01 11:36:42 -04:00
if ( md - > egress_for_echannel [ echannel ] . equeue ! = NULL )
2012-06-07 10:45:02 +00:00
return 0 ;
/* Allocate memory for the "headers". */
headers_order = get_order ( EQUEUE_ENTRIES * HEADER_BYTES ) ;
headers_page = alloc_pages ( GFP_KERNEL , headers_order ) ;
if ( headers_page = = NULL ) {
netdev_warn ( dev ,
" Could not alloc %zd bytes for TSO headers. \n " ,
PAGE_SIZE < < headers_order ) ;
goto fail ;
}
headers = pfn_to_kaddr ( page_to_pfn ( headers_page ) ) ;
/* Allocate memory for the "edescs". */
edescs_size = EQUEUE_ENTRIES * sizeof ( * edescs ) ;
edescs_order = get_order ( edescs_size ) ;
edescs_page = alloc_pages ( GFP_KERNEL , edescs_order ) ;
if ( edescs_page = = NULL ) {
netdev_warn ( dev ,
" Could not alloc %zd bytes for eDMA ring. \n " ,
edescs_size ) ;
goto fail_headers ;
}
edescs = pfn_to_kaddr ( page_to_pfn ( edescs_page ) ) ;
/* Allocate memory for the "equeue". */
equeue_order = get_order ( sizeof ( * equeue ) ) ;
equeue_page = alloc_pages ( GFP_KERNEL , equeue_order ) ;
if ( equeue_page = = NULL ) {
netdev_warn ( dev ,
" Could not alloc %zd bytes for equeue info. \n " ,
PAGE_SIZE < < equeue_order ) ;
goto fail_edescs ;
}
equeue = pfn_to_kaddr ( page_to_pfn ( equeue_page ) ) ;
2013-08-01 11:36:42 -04:00
/* Allocate an edma ring (using a one entry "free list"). */
if ( ering < 0 ) {
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_alloc_edma_rings ( & md - > context , 1 , 0 , 0 ) ;
2013-08-01 11:36:42 -04:00
if ( rc < 0 ) {
2013-08-01 11:36:42 -04:00
netdev_warn ( dev , " gxio_mpipe_alloc_edma_rings: "
" mpipe[%d] %d \n " , instance , rc ) ;
2013-08-01 11:36:42 -04:00
goto fail_equeue ;
}
ering = rc ;
2012-06-07 10:45:02 +00:00
}
/* Initialize the equeue. */
2013-08-01 11:36:42 -04:00
rc = gxio_mpipe_equeue_init ( equeue , & md - > context , ering , echannel ,
2012-06-07 10:45:02 +00:00
edescs , edescs_size , 0 ) ;
if ( rc ! = 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " gxio_mpipe_equeue_init: mpipe[%d] %d \n " ,
instance , rc ) ;
2012-06-07 10:45:02 +00:00
goto fail_equeue ;
}
2013-08-01 11:36:42 -04:00
/* Don't reuse the ering later. */
ering = - 1 ;
if ( jumbo_num ! = 0 ) {
/* Make sure "jumbo" packets can be egressed safely. */
if ( gxio_mpipe_equeue_set_snf_size ( equeue , 10368 ) < 0 ) {
/* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
netdev_warn ( dev , " Jumbo packets may not be egressed "
" properly on channel %d \n " , echannel ) ;
}
}
2012-06-07 10:45:02 +00:00
/* Done. */
2013-08-01 11:36:42 -04:00
md - > egress_for_echannel [ echannel ] . equeue = equeue ;
md - > egress_for_echannel [ echannel ] . headers = headers ;
2012-06-07 10:45:02 +00:00
return 0 ;
fail_equeue :
__free_pages ( equeue_page , equeue_order ) ;
fail_edescs :
__free_pages ( edescs_page , edescs_order ) ;
fail_headers :
__free_pages ( headers_page , headers_order ) ;
fail :
return rc ;
}
/* Return channel number for a newly-opened link. */
static int tile_net_link_open ( struct net_device * dev , gxio_mpipe_link_t * link ,
const char * link_name )
{
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
int rc = gxio_mpipe_link_open ( link , & md - > context , link_name , 0 ) ;
2012-06-07 10:45:02 +00:00
if ( rc < 0 ) {
2013-08-01 11:36:42 -04:00
netdev_err ( dev , " Failed to open '%s', mpipe[%d], %d \n " ,
link_name , instance , rc ) ;
2012-06-07 10:45:02 +00:00
return rc ;
}
2013-08-01 11:36:42 -04:00
if ( jumbo_num ! = 0 ) {
u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO ;
rc = gxio_mpipe_link_set_attr ( link , attr , 1 ) ;
if ( rc ! = 0 ) {
netdev_err ( dev ,
" Cannot receive jumbo packets on '%s' \n " ,
link_name ) ;
gxio_mpipe_link_close ( link ) ;
return rc ;
}
}
2012-06-07 10:45:02 +00:00
rc = gxio_mpipe_link_channel ( link ) ;
if ( rc < 0 | | rc > = TILE_NET_CHANNELS ) {
netdev_err ( dev , " gxio_mpipe_link_channel bad value: %d \n " , rc ) ;
gxio_mpipe_link_close ( link ) ;
return - EINVAL ;
}
return rc ;
}
/* Help the kernel activate the given network interface. */
static int tile_net_open ( struct net_device * dev )
{
struct tile_net_priv * priv = netdev_priv ( dev ) ;
2013-08-01 11:36:42 -04:00
int cpu , rc , instance ;
2012-06-07 10:45:02 +00:00
mutex_lock ( & tile_net_devs_for_channel_mutex ) ;
2013-08-01 11:36:42 -04:00
/* Get the instance info. */
rc = gxio_mpipe_link_instance ( dev - > name ) ;
2013-08-05 12:42:00 +08:00
if ( rc < 0 | | rc > = NR_MPIPE_MAX ) {
mutex_unlock ( & tile_net_devs_for_channel_mutex ) ;
2013-08-01 11:36:42 -04:00
return - EIO ;
2013-08-05 12:42:00 +08:00
}
2013-08-01 11:36:42 -04:00
priv - > instance = rc ;
instance = rc ;
if ( ! mpipe_data [ rc ] . context . mmio_fast_base ) {
/* Do one-time initialization per instance the first time
* any device is opened .
*/
2012-06-07 10:45:02 +00:00
rc = tile_net_init_mpipe ( dev ) ;
if ( rc ! = 0 )
goto fail ;
}
/* Determine if this is the "loopify" device. */
if ( unlikely ( ( loopify_link_name ! = NULL ) & &
! strcmp ( dev - > name , loopify_link_name ) ) ) {
rc = tile_net_link_open ( dev , & priv - > link , " loop0 " ) ;
if ( rc < 0 )
goto fail ;
priv - > channel = rc ;
rc = tile_net_link_open ( dev , & priv - > loopify_link , " loop1 " ) ;
if ( rc < 0 )
goto fail ;
priv - > loopify_channel = rc ;
priv - > echannel = rc ;
} else {
rc = tile_net_link_open ( dev , & priv - > link , dev - > name ) ;
if ( rc < 0 )
goto fail ;
priv - > channel = rc ;
priv - > echannel = rc ;
}
/* Initialize egress info (if needed). Once ever, per echannel. */
rc = tile_net_init_egress ( dev , priv - > echannel ) ;
if ( rc ! = 0 )
goto fail ;
2013-08-01 11:36:42 -04:00
mpipe_data [ instance ] . tile_net_devs_for_channel [ priv - > channel ] = dev ;
2012-06-07 10:45:02 +00:00
rc = tile_net_update ( dev ) ;
if ( rc ! = 0 )
goto fail ;
mutex_unlock ( & tile_net_devs_for_channel_mutex ) ;
/* Initialize the transmit wake timer for this device for each cpu. */
for_each_online_cpu ( cpu ) {
struct tile_net_info * info = & per_cpu ( per_cpu_info , cpu ) ;
struct tile_net_tx_wake * tx_wake =
2013-08-01 11:36:42 -04:00
& info - > mpipe [ instance ] . tx_wake [ priv - > echannel ] ;
2012-06-07 10:45:02 +00:00
hrtimer_init ( & tx_wake - > timer , CLOCK_MONOTONIC ,
HRTIMER_MODE_REL ) ;
2012-07-01 14:43:47 -04:00
tx_wake - > tx_queue_idx = cpu ;
2012-06-07 10:45:02 +00:00
tx_wake - > timer . function = tile_net_handle_tx_wake_timer ;
tx_wake - > dev = dev ;
}
for_each_online_cpu ( cpu )
netif_start_subqueue ( dev , cpu ) ;
netif_carrier_on ( dev ) ;
return 0 ;
fail :
if ( priv - > loopify_channel > = 0 ) {
if ( gxio_mpipe_link_close ( & priv - > loopify_link ) ! = 0 )
netdev_warn ( dev , " Failed to close loopify link! \n " ) ;
priv - > loopify_channel = - 1 ;
}
if ( priv - > channel > = 0 ) {
if ( gxio_mpipe_link_close ( & priv - > link ) ! = 0 )
netdev_warn ( dev , " Failed to close link! \n " ) ;
priv - > channel = - 1 ;
}
priv - > echannel = - 1 ;
2013-08-01 11:36:42 -04:00
mpipe_data [ instance ] . tile_net_devs_for_channel [ priv - > channel ] = NULL ;
2012-06-07 10:45:02 +00:00
mutex_unlock ( & tile_net_devs_for_channel_mutex ) ;
/* Don't return raw gxio error codes to generic Linux. */
return ( rc > - 512 ) ? rc : - EIO ;
}
/* Help the kernel deactivate the given network interface. */
static int tile_net_stop ( struct net_device * dev )
{
struct tile_net_priv * priv = netdev_priv ( dev ) ;
int cpu ;
2013-08-01 11:36:42 -04:00
int instance = priv - > instance ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-06-07 10:45:02 +00:00
for_each_online_cpu ( cpu ) {
struct tile_net_info * info = & per_cpu ( per_cpu_info , cpu ) ;
struct tile_net_tx_wake * tx_wake =
2013-08-01 11:36:42 -04:00
& info - > mpipe [ instance ] . tx_wake [ priv - > echannel ] ;
2012-06-07 10:45:02 +00:00
hrtimer_cancel ( & tx_wake - > timer ) ;
netif_stop_subqueue ( dev , cpu ) ;
}
mutex_lock ( & tile_net_devs_for_channel_mutex ) ;
2013-08-01 11:36:42 -04:00
md - > tile_net_devs_for_channel [ priv - > channel ] = NULL ;
2012-06-07 10:45:02 +00:00
( void ) tile_net_update ( dev ) ;
if ( priv - > loopify_channel > = 0 ) {
if ( gxio_mpipe_link_close ( & priv - > loopify_link ) ! = 0 )
netdev_warn ( dev , " Failed to close loopify link! \n " ) ;
priv - > loopify_channel = - 1 ;
}
if ( priv - > channel > = 0 ) {
if ( gxio_mpipe_link_close ( & priv - > link ) ! = 0 )
netdev_warn ( dev , " Failed to close link! \n " ) ;
priv - > channel = - 1 ;
}
priv - > echannel = - 1 ;
mutex_unlock ( & tile_net_devs_for_channel_mutex ) ;
return 0 ;
}
/* Determine the VA for a fragment. */
static inline void * tile_net_frag_buf ( skb_frag_t * f )
{
unsigned long pfn = page_to_pfn ( skb_frag_page ( f ) ) ;
return pfn_to_kaddr ( pfn ) + f - > page_offset ;
}
/* Acquire a completion entry and an egress slot, or if we can't,
* stop the queue and schedule the tx_wake timer .
*/
static s64 tile_net_equeue_try_reserve ( struct net_device * dev ,
2012-07-01 14:43:47 -04:00
int tx_queue_idx ,
2012-06-07 10:45:02 +00:00
struct tile_net_comps * comps ,
gxio_mpipe_equeue_t * equeue ,
int num_edescs )
{
/* Try to acquire a completion entry. */
if ( comps - > comp_next - comps - > comp_last < TILE_NET_MAX_COMPS - 1 | |
tile_net_free_comps ( equeue , comps , 32 , false ) ! = 0 ) {
/* Try to acquire an egress slot. */
s64 slot = gxio_mpipe_equeue_try_reserve ( equeue , num_edescs ) ;
if ( slot > = 0 )
return slot ;
/* Freeing some completions gives the equeue time to drain. */
tile_net_free_comps ( equeue , comps , TILE_NET_MAX_COMPS , false ) ;
slot = gxio_mpipe_equeue_try_reserve ( equeue , num_edescs ) ;
if ( slot > = 0 )
return slot ;
}
/* Still nothing; give up and stop the queue for a short while. */
2012-07-01 14:43:47 -04:00
netif_stop_subqueue ( dev , tx_queue_idx ) ;
tile_net_schedule_tx_wake_timer ( dev , tx_queue_idx ) ;
2012-06-07 10:45:02 +00:00
return - 1 ;
}
/* Determine how many edesc's are needed for TSO.
*
* Sometimes , if " sendfile() " requires copying , we will be called with
* " data " containing the header and payload , with " frags " being empty .
* Sometimes , for example when using NFS over TCP , a single segment can
* span 3 fragments . This requires special care .
*/
static int tso_count_edescs ( struct sk_buff * skb )
{
struct skb_shared_info * sh = skb_shinfo ( skb ) ;
2012-07-11 14:08:21 -04:00
unsigned int sh_len = skb_transport_offset ( skb ) + tcp_hdrlen ( skb ) ;
2012-10-25 07:25:20 +00:00
unsigned int data_len = skb - > len - sh_len ;
2012-06-07 10:45:02 +00:00
unsigned int p_len = sh - > gso_size ;
long f_id = - 1 ; /* id of the current fragment */
2012-10-25 07:25:20 +00:00
long f_size = skb_headlen ( skb ) - sh_len ; /* current fragment size */
long f_used = 0 ; /* bytes used from the current fragment */
2012-06-07 10:45:02 +00:00
long n ; /* size of the current piece of payload */
int num_edescs = 0 ;
int segment ;
for ( segment = 0 ; segment < sh - > gso_segs ; segment + + ) {
unsigned int p_used = 0 ;
/* One edesc for header and for each piece of the payload. */
for ( num_edescs + + ; p_used < p_len ; num_edescs + + ) {
/* Advance as needed. */
while ( f_used > = f_size ) {
f_id + + ;
2012-10-25 07:25:20 +00:00
f_size = skb_frag_size ( & sh - > frags [ f_id ] ) ;
2012-06-07 10:45:02 +00:00
f_used = 0 ;
}
/* Use bytes from the current fragment. */
n = p_len - p_used ;
if ( n > f_size - f_used )
n = f_size - f_used ;
f_used + = n ;
p_used + = n ;
}
/* The last segment may be less than gso_size. */
data_len - = p_len ;
if ( data_len < p_len )
p_len = data_len ;
}
return num_edescs ;
}
2013-08-01 11:36:42 -04:00
/* Prepare modified copies of the skbuff headers. */
2012-06-07 10:45:02 +00:00
static void tso_headers_prepare ( struct sk_buff * skb , unsigned char * headers ,
s64 slot )
{
struct skb_shared_info * sh = skb_shinfo ( skb ) ;
struct iphdr * ih ;
2013-08-01 11:36:42 -04:00
struct ipv6hdr * ih6 ;
2012-06-07 10:45:02 +00:00
struct tcphdr * th ;
2012-07-11 14:08:21 -04:00
unsigned int sh_len = skb_transport_offset ( skb ) + tcp_hdrlen ( skb ) ;
2012-10-25 07:25:20 +00:00
unsigned int data_len = skb - > len - sh_len ;
2012-06-07 10:45:02 +00:00
unsigned char * data = skb - > data ;
2012-07-11 14:08:21 -04:00
unsigned int ih_off , th_off , p_len ;
2013-09-09 14:11:54 -04:00
unsigned int isum_seed , tsum_seed , seq ;
unsigned int uninitialized_var ( id ) ;
2013-08-01 11:36:42 -04:00
int is_ipv6 ;
2012-06-07 10:45:02 +00:00
long f_id = - 1 ; /* id of the current fragment */
2012-10-25 07:25:20 +00:00
long f_size = skb_headlen ( skb ) - sh_len ; /* current fragment size */
long f_used = 0 ; /* bytes used from the current fragment */
2012-06-07 10:45:02 +00:00
long n ; /* size of the current piece of payload */
int segment ;
/* Locate original headers and compute various lengths. */
2013-08-01 11:36:42 -04:00
is_ipv6 = skb_is_gso_v6 ( skb ) ;
if ( is_ipv6 ) {
ih6 = ipv6_hdr ( skb ) ;
ih_off = skb_network_offset ( skb ) ;
} else {
ih = ip_hdr ( skb ) ;
ih_off = skb_network_offset ( skb ) ;
isum_seed = ( ( 0xFFFF - ih - > check ) +
( 0xFFFF - ih - > tot_len ) +
( 0xFFFF - ih - > id ) ) ;
id = ntohs ( ih - > id ) ;
}
2012-06-07 10:45:02 +00:00
th = tcp_hdr ( skb ) ;
th_off = skb_transport_offset ( skb ) ;
p_len = sh - > gso_size ;
2012-10-25 07:25:20 +00:00
tsum_seed = th - > check + ( 0xFFFF ^ htons ( skb - > len ) ) ;
2012-06-07 10:45:02 +00:00
seq = ntohl ( th - > seq ) ;
/* Prepare all the headers. */
for ( segment = 0 ; segment < sh - > gso_segs ; segment + + ) {
unsigned char * buf ;
unsigned int p_used = 0 ;
/* Copy to the header memory for this segment. */
buf = headers + ( slot % EQUEUE_ENTRIES ) * HEADER_BYTES +
NET_IP_ALIGN ;
memcpy ( buf , data , sh_len ) ;
/* Update copied ip header. */
2013-08-01 11:36:42 -04:00
if ( is_ipv6 ) {
ih6 = ( struct ipv6hdr * ) ( buf + ih_off ) ;
ih6 - > payload_len = htons ( sh_len + p_len - ih_off -
sizeof ( * ih6 ) ) ;
} else {
ih = ( struct iphdr * ) ( buf + ih_off ) ;
ih - > tot_len = htons ( sh_len + p_len - ih_off ) ;
2013-09-09 14:11:54 -04:00
ih - > id = htons ( id + + ) ;
2013-08-01 11:36:42 -04:00
ih - > check = csum_long ( isum_seed + ih - > tot_len +
ih - > id ) ^ 0xffff ;
}
2012-06-07 10:45:02 +00:00
/* Update copied tcp header. */
th = ( struct tcphdr * ) ( buf + th_off ) ;
th - > seq = htonl ( seq ) ;
th - > check = csum_long ( tsum_seed + htons ( sh_len + p_len ) ) ;
if ( segment ! = sh - > gso_segs - 1 ) {
th - > fin = 0 ;
th - > psh = 0 ;
}
/* Skip past the header. */
slot + + ;
/* Skip past the payload. */
while ( p_used < p_len ) {
/* Advance as needed. */
while ( f_used > = f_size ) {
f_id + + ;
2012-10-25 07:25:20 +00:00
f_size = skb_frag_size ( & sh - > frags [ f_id ] ) ;
2012-06-07 10:45:02 +00:00
f_used = 0 ;
}
/* Use bytes from the current fragment. */
n = p_len - p_used ;
if ( n > f_size - f_used )
n = f_size - f_used ;
f_used + = n ;
p_used + = n ;
slot + + ;
}
seq + = p_len ;
/* The last segment may be less than gso_size. */
data_len - = p_len ;
if ( data_len < p_len )
p_len = data_len ;
}
/* Flush the headers so they are ready for hardware DMA. */
wmb ( ) ;
}
/* Pass all the data to mpipe for egress. */
static void tso_egress ( struct net_device * dev , gxio_mpipe_equeue_t * equeue ,
struct sk_buff * skb , unsigned char * headers , s64 slot )
{
struct skb_shared_info * sh = skb_shinfo ( skb ) ;
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
2012-07-11 14:08:21 -04:00
unsigned int sh_len = skb_transport_offset ( skb ) + tcp_hdrlen ( skb ) ;
2012-10-25 07:25:20 +00:00
unsigned int data_len = skb - > len - sh_len ;
2012-06-07 10:45:02 +00:00
unsigned int p_len = sh - > gso_size ;
gxio_mpipe_edesc_t edesc_head = { { 0 } } ;
gxio_mpipe_edesc_t edesc_body = { { 0 } } ;
long f_id = - 1 ; /* id of the current fragment */
2012-10-25 07:25:20 +00:00
long f_size = skb_headlen ( skb ) - sh_len ; /* current fragment size */
long f_used = 0 ; /* bytes used from the current fragment */
void * f_data = skb - > data + sh_len ;
2012-06-07 10:45:02 +00:00
long n ; /* size of the current piece of payload */
unsigned long tx_packets = 0 , tx_bytes = 0 ;
2012-07-11 14:08:21 -04:00
unsigned int csum_start ;
2012-06-07 10:45:02 +00:00
int segment ;
/* Prepare to egress the headers: set up header edesc. */
csum_start = skb_checksum_start_offset ( skb ) ;
edesc_head . csum = 1 ;
edesc_head . csum_start = csum_start ;
edesc_head . csum_dest = csum_start + skb - > csum_offset ;
edesc_head . xfer_size = sh_len ;
/* This is only used to specify the TLB. */
2013-08-01 11:36:42 -04:00
edesc_head . stack_idx = md - > first_buffer_stack ;
edesc_body . stack_idx = md - > first_buffer_stack ;
2012-06-07 10:45:02 +00:00
/* Egress all the edescs. */
for ( segment = 0 ; segment < sh - > gso_segs ; segment + + ) {
unsigned char * buf ;
unsigned int p_used = 0 ;
/* Egress the header. */
buf = headers + ( slot % EQUEUE_ENTRIES ) * HEADER_BYTES +
NET_IP_ALIGN ;
edesc_head . va = va_to_tile_io_addr ( buf ) ;
gxio_mpipe_equeue_put_at ( equeue , edesc_head , slot ) ;
slot + + ;
/* Egress the payload. */
while ( p_used < p_len ) {
2012-10-25 07:25:20 +00:00
void * va ;
2012-06-07 10:45:02 +00:00
/* Advance as needed. */
while ( f_used > = f_size ) {
f_id + + ;
2012-10-25 07:25:20 +00:00
f_size = skb_frag_size ( & sh - > frags [ f_id ] ) ;
2012-07-11 14:08:21 -04:00
f_data = tile_net_frag_buf ( & sh - > frags [ f_id ] ) ;
2012-10-25 07:25:20 +00:00
f_used = 0 ;
2012-06-07 10:45:02 +00:00
}
2012-10-25 07:25:20 +00:00
va = f_data + f_used ;
2012-06-07 10:45:02 +00:00
/* Use bytes from the current fragment. */
n = p_len - p_used ;
if ( n > f_size - f_used )
n = f_size - f_used ;
f_used + = n ;
p_used + = n ;
/* Egress a piece of the payload. */
2012-10-25 07:25:20 +00:00
edesc_body . va = va_to_tile_io_addr ( va ) ;
2012-06-07 10:45:02 +00:00
edesc_body . xfer_size = n ;
edesc_body . bound = ! ( p_used < p_len ) ;
gxio_mpipe_equeue_put_at ( equeue , edesc_body , slot ) ;
slot + + ;
}
tx_packets + + ;
tx_bytes + = sh_len + p_len ;
/* The last segment may be less than gso_size. */
data_len - = p_len ;
if ( data_len < p_len )
p_len = data_len ;
}
/* Update stats. */
2013-08-01 11:36:42 -04:00
tile_net_stats_add ( tx_packets , & dev - > stats . tx_packets ) ;
tile_net_stats_add ( tx_bytes , & dev - > stats . tx_bytes ) ;
2012-06-07 10:45:02 +00:00
}
/* Do "TSO" handling for egress.
*
* Normally drivers set NETIF_F_TSO only to support hardware TSO ;
* otherwise the stack uses scatter - gather to implement GSO in software .
* On our testing , enabling GSO support ( via NETIF_F_SG ) drops network
* performance down to around 7.5 Gbps on the 10 G interfaces , although
* also dropping cpu utilization way down , to under 8 % . But
* implementing " TSO " in the driver brings performance back up to line
* rate , while dropping cpu usage even further , to less than 4 % . In
* practice , profiling of GSO shows that skb_segment ( ) is what causes
* the performance overheads ; we benefit in the driver from using
* preallocated memory to duplicate the TCP / IP headers .
*/
static int tile_net_tx_tso ( struct sk_buff * skb , struct net_device * dev )
{
2014-08-17 12:30:38 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2012-06-07 10:45:02 +00:00
struct tile_net_priv * priv = netdev_priv ( dev ) ;
int channel = priv - > echannel ;
2013-08-01 11:36:42 -04:00
int instance = priv - > instance ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
struct tile_net_egress * egress = & md - > egress_for_echannel [ channel ] ;
struct tile_net_comps * comps =
info - > mpipe [ instance ] . comps_for_echannel [ channel ] ;
2012-06-07 10:45:02 +00:00
gxio_mpipe_equeue_t * equeue = egress - > equeue ;
unsigned long irqflags ;
int num_edescs ;
s64 slot ;
/* Determine how many mpipe edesc's are needed. */
num_edescs = tso_count_edescs ( skb ) ;
local_irq_save ( irqflags ) ;
/* Try to acquire a completion entry and an egress slot. */
2012-07-01 14:43:47 -04:00
slot = tile_net_equeue_try_reserve ( dev , skb - > queue_mapping , comps ,
equeue , num_edescs ) ;
2012-06-07 10:45:02 +00:00
if ( slot < 0 ) {
local_irq_restore ( irqflags ) ;
return NETDEV_TX_BUSY ;
}
/* Set up copies of header data properly. */
tso_headers_prepare ( skb , egress - > headers , slot ) ;
/* Actually pass the data to the network hardware. */
tso_egress ( dev , equeue , skb , egress - > headers , slot ) ;
/* Add a completion record. */
add_comp ( equeue , comps , slot + num_edescs - 1 , skb ) ;
local_irq_restore ( irqflags ) ;
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer ( ) ;
return NETDEV_TX_OK ;
}
/* Analyze the body and frags for a transmit request. */
static unsigned int tile_net_tx_frags ( struct frag * frags ,
struct sk_buff * skb ,
void * b_data , unsigned int b_len )
{
unsigned int i , n = 0 ;
struct skb_shared_info * sh = skb_shinfo ( skb ) ;
if ( b_len ! = 0 ) {
frags [ n ] . buf = b_data ;
frags [ n + + ] . length = b_len ;
}
for ( i = 0 ; i < sh - > nr_frags ; i + + ) {
skb_frag_t * f = & sh - > frags [ i ] ;
frags [ n ] . buf = tile_net_frag_buf ( f ) ;
frags [ n + + ] . length = skb_frag_size ( f ) ;
}
return n ;
}
/* Help the kernel transmit a packet. */
static int tile_net_tx ( struct sk_buff * skb , struct net_device * dev )
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2012-06-07 10:45:02 +00:00
struct tile_net_priv * priv = netdev_priv ( dev ) ;
2013-08-01 11:36:42 -04:00
int instance = priv - > instance ;
struct mpipe_data * md = & mpipe_data [ instance ] ;
struct tile_net_egress * egress =
& md - > egress_for_echannel [ priv - > echannel ] ;
2012-06-07 10:45:02 +00:00
gxio_mpipe_equeue_t * equeue = egress - > equeue ;
struct tile_net_comps * comps =
2013-08-01 11:36:42 -04:00
info - > mpipe [ instance ] . comps_for_echannel [ priv - > echannel ] ;
2012-06-07 10:45:02 +00:00
unsigned int len = skb - > len ;
unsigned char * data = skb - > data ;
unsigned int num_edescs ;
struct frag frags [ MAX_FRAGS ] ;
gxio_mpipe_edesc_t edescs [ MAX_FRAGS ] ;
unsigned long irqflags ;
gxio_mpipe_edesc_t edesc = { { 0 } } ;
unsigned int i ;
s64 slot ;
if ( skb_is_gso ( skb ) )
return tile_net_tx_tso ( skb , dev ) ;
num_edescs = tile_net_tx_frags ( frags , skb , data , skb_headlen ( skb ) ) ;
/* This is only used to specify the TLB. */
2013-08-01 11:36:42 -04:00
edesc . stack_idx = md - > first_buffer_stack ;
2012-06-07 10:45:02 +00:00
/* Prepare the edescs. */
for ( i = 0 ; i < num_edescs ; i + + ) {
edesc . xfer_size = frags [ i ] . length ;
edesc . va = va_to_tile_io_addr ( frags [ i ] . buf ) ;
edescs [ i ] = edesc ;
}
/* Mark the final edesc. */
edescs [ num_edescs - 1 ] . bound = 1 ;
/* Add checksum info to the initial edesc, if needed. */
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
unsigned int csum_start = skb_checksum_start_offset ( skb ) ;
edescs [ 0 ] . csum = 1 ;
edescs [ 0 ] . csum_start = csum_start ;
edescs [ 0 ] . csum_dest = csum_start + skb - > csum_offset ;
}
local_irq_save ( irqflags ) ;
/* Try to acquire a completion entry and an egress slot. */
2012-07-01 14:43:47 -04:00
slot = tile_net_equeue_try_reserve ( dev , skb - > queue_mapping , comps ,
equeue , num_edescs ) ;
2012-06-07 10:45:02 +00:00
if ( slot < 0 ) {
local_irq_restore ( irqflags ) ;
return NETDEV_TX_BUSY ;
}
for ( i = 0 ; i < num_edescs ; i + + )
gxio_mpipe_equeue_put_at ( equeue , edescs [ i ] , slot + + ) ;
2013-08-01 11:36:42 -04:00
/* Store TX timestamp if needed. */
tile_tx_timestamp ( skb , instance ) ;
2012-06-07 10:45:02 +00:00
/* Add a completion record. */
add_comp ( equeue , comps , slot - 1 , skb ) ;
/* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
2013-08-01 11:36:42 -04:00
tile_net_stats_add ( 1 , & dev - > stats . tx_packets ) ;
2012-06-07 10:45:02 +00:00
tile_net_stats_add ( max_t ( unsigned int , len , ETH_ZLEN ) ,
2013-08-01 11:36:42 -04:00
& dev - > stats . tx_bytes ) ;
2012-06-07 10:45:02 +00:00
local_irq_restore ( irqflags ) ;
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer ( ) ;
return NETDEV_TX_OK ;
}
/* Return subqueue id on this core (one per core). */
2014-01-10 16:18:26 +08:00
static u16 tile_net_select_queue ( struct net_device * dev , struct sk_buff * skb ,
2014-02-16 15:55:20 +01:00
void * accel_priv , select_queue_fallback_t fallback )
2012-06-07 10:45:02 +00:00
{
return smp_processor_id ( ) ;
}
/* Deal with a transmit timeout. */
static void tile_net_tx_timeout ( struct net_device * dev )
{
int cpu ;
for_each_online_cpu ( cpu )
netif_wake_subqueue ( dev , cpu ) ;
}
/* Ioctl commands. */
static int tile_net_ioctl ( struct net_device * dev , struct ifreq * rq , int cmd )
{
2013-08-01 11:36:42 -04:00
if ( cmd = = SIOCSHWTSTAMP )
2013-11-18 23:25:20 +00:00
return tile_hwtstamp_set ( dev , rq ) ;
if ( cmd = = SIOCGHWTSTAMP )
return tile_hwtstamp_get ( dev , rq ) ;
2013-08-01 11:36:42 -04:00
2012-06-07 10:45:02 +00:00
return - EOPNOTSUPP ;
}
/* Change the MTU. */
static int tile_net_change_mtu ( struct net_device * dev , int new_mtu )
{
2013-08-01 11:36:42 -04:00
if ( new_mtu < 68 )
return - EINVAL ;
if ( new_mtu > ( ( jumbo_num ! = 0 ) ? 9000 : 1500 ) )
2012-06-07 10:45:02 +00:00
return - EINVAL ;
dev - > mtu = new_mtu ;
return 0 ;
}
/* Change the Ethernet address of the NIC.
*
* The hypervisor driver does not support changing MAC address . However ,
* the hardware does not do anything with the MAC address , so the address
* which gets used on outgoing packets , and which is accepted on incoming
* packets , is completely up to us .
*
* Returns 0 on success , negative on failure .
*/
static int tile_net_set_mac_address ( struct net_device * dev , void * p )
{
struct sockaddr * addr = p ;
if ( ! is_valid_ether_addr ( addr - > sa_data ) )
return - EINVAL ;
memcpy ( dev - > dev_addr , addr - > sa_data , dev - > addr_len ) ;
return 0 ;
}
# ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re - enable interrupts . It ' s not called while
* the interrupt routine is executing .
*/
static void tile_net_netpoll ( struct net_device * dev )
{
2013-08-01 11:36:42 -04:00
int instance = mpipe_instance ( dev ) ;
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2013-08-01 11:36:42 -04:00
struct mpipe_data * md = & mpipe_data [ instance ] ;
disable_percpu_irq ( md - > ingress_irq ) ;
napi_schedule ( & info - > mpipe [ instance ] . napi ) ;
enable_percpu_irq ( md - > ingress_irq , 0 ) ;
2012-06-07 10:45:02 +00:00
}
# endif
static const struct net_device_ops tile_net_ops = {
. ndo_open = tile_net_open ,
. ndo_stop = tile_net_stop ,
. ndo_start_xmit = tile_net_tx ,
. ndo_select_queue = tile_net_select_queue ,
. ndo_do_ioctl = tile_net_ioctl ,
. ndo_change_mtu = tile_net_change_mtu ,
. ndo_tx_timeout = tile_net_tx_timeout ,
. ndo_set_mac_address = tile_net_set_mac_address ,
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = tile_net_netpoll ,
# endif
} ;
/* The setup function.
*
* This uses ether_setup ( ) to assign various fields in dev , including
* setting IFF_BROADCAST and IFF_MULTICAST , then sets some extra fields .
*/
static void tile_net_setup ( struct net_device * dev )
{
2013-08-01 11:36:42 -04:00
netdev_features_t features = 0 ;
2012-06-07 10:45:02 +00:00
ether_setup ( dev ) ;
dev - > netdev_ops = & tile_net_ops ;
dev - > watchdog_timeo = TILE_NET_TIMEOUT ;
dev - > mtu = 1500 ;
2013-08-01 11:36:42 -04:00
features | = NETIF_F_HW_CSUM ;
features | = NETIF_F_SG ;
features | = NETIF_F_TSO ;
2013-08-01 11:36:42 -04:00
features | = NETIF_F_TSO6 ;
2013-08-01 11:36:42 -04:00
dev - > hw_features | = features ;
dev - > vlan_features | = features ;
dev - > features | = features ;
2012-06-07 10:45:02 +00:00
}
/* Allocate the device structure, register the device, and obtain the
* MAC address from the hypervisor .
*/
static void tile_net_dev_init ( const char * name , const uint8_t * mac )
{
int ret ;
struct net_device * dev ;
struct tile_net_priv * priv ;
/* HACK: Ignore "loop" links. */
if ( strncmp ( name , " loop " , 4 ) = = 0 )
return ;
/* Allocate the device structure. Normally, "name" is a
* template , instantiated by register_netdev ( ) , but not for us .
*/
net: set name_assign_type in alloc_netdev()
Extend alloc_netdev{,_mq{,s}}() to take name_assign_type as argument, and convert
all users to pass NET_NAME_UNKNOWN.
Coccinelle patch:
@@
expression sizeof_priv, name, setup, txqs, rxqs, count;
@@
(
-alloc_netdev_mqs(sizeof_priv, name, setup, txqs, rxqs)
+alloc_netdev_mqs(sizeof_priv, name, NET_NAME_UNKNOWN, setup, txqs, rxqs)
|
-alloc_netdev_mq(sizeof_priv, name, setup, count)
+alloc_netdev_mq(sizeof_priv, name, NET_NAME_UNKNOWN, setup, count)
|
-alloc_netdev(sizeof_priv, name, setup)
+alloc_netdev(sizeof_priv, name, NET_NAME_UNKNOWN, setup)
)
v9: move comments here from the wrong commit
Signed-off-by: Tom Gundersen <teg@jklm.no>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-14 16:37:24 +02:00
dev = alloc_netdev_mqs ( sizeof ( * priv ) , name , NET_NAME_UNKNOWN ,
tile_net_setup , NR_CPUS , 1 ) ;
2012-06-07 10:45:02 +00:00
if ( ! dev ) {
pr_err ( " alloc_netdev_mqs(%s) failed \n " , name ) ;
return ;
}
/* Initialize "priv". */
priv = netdev_priv ( dev ) ;
priv - > dev = dev ;
priv - > channel = - 1 ;
priv - > loopify_channel = - 1 ;
priv - > echannel = - 1 ;
2013-08-01 11:36:42 -04:00
init_ptp_dev ( priv ) ;
2012-06-07 10:45:02 +00:00
/* Get the MAC address and set it in the device struct; this must
* be done before the device is opened . If the MAC is all zeroes ,
* we use a random address , since we ' re probably on the simulator .
*/
2014-05-30 09:47:17 +02:00
if ( ! is_zero_ether_addr ( mac ) )
ether_addr_copy ( dev - > dev_addr , mac ) ;
else
2012-07-18 12:23:06 -04:00
eth_hw_addr_random ( dev ) ;
2012-06-07 10:45:02 +00:00
/* Register the network device. */
ret = register_netdev ( dev ) ;
if ( ret ) {
netdev_err ( dev , " register_netdev failed %d \n " , ret ) ;
free_netdev ( dev ) ;
return ;
}
}
/* Per-cpu module initialization. */
static void tile_net_init_module_percpu ( void * unused )
{
2014-08-17 12:30:33 -05:00
struct tile_net_info * info = this_cpu_ptr ( & per_cpu_info ) ;
2012-06-07 10:45:02 +00:00
int my_cpu = smp_processor_id ( ) ;
2013-08-01 11:36:42 -04:00
int instance ;
2012-06-07 10:45:02 +00:00
2013-08-01 11:36:42 -04:00
for ( instance = 0 ; instance < NR_MPIPE_MAX ; instance + + ) {
info - > mpipe [ instance ] . has_iqueue = false ;
info - > mpipe [ instance ] . instance = instance ;
}
2012-06-07 10:45:02 +00:00
info - > my_cpu = my_cpu ;
/* Initialize the egress timer. */
hrtimer_init ( & info - > egress_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL ) ;
info - > egress_timer . function = tile_net_handle_egress_timer ;
}
/* Module initialization. */
static int __init tile_net_init_module ( void )
{
int i ;
char name [ GXIO_MPIPE_LINK_NAME_LEN ] ;
uint8_t mac [ 6 ] ;
pr_info ( " Tilera Network Driver \n " ) ;
2013-08-01 11:36:42 -04:00
BUILD_BUG_ON ( NR_MPIPE_MAX ! = 2 ) ;
2012-06-07 10:45:02 +00:00
mutex_init ( & tile_net_devs_for_channel_mutex ) ;
/* Initialize each CPU. */
on_each_cpu ( tile_net_init_module_percpu , NULL , 1 ) ;
/* Find out what devices we have, and initialize them. */
for ( i = 0 ; gxio_mpipe_link_enumerate_mac ( i , name , mac ) > = 0 ; i + + )
tile_net_dev_init ( name , mac ) ;
if ( ! network_cpus_init ( ) )
2015-07-10 15:37:25 -04:00
cpumask_and ( & network_cpus_map , housekeeping_cpumask ( ) ,
cpu_online_mask ) ;
2012-06-07 10:45:02 +00:00
return 0 ;
}
module_init ( tile_net_init_module ) ;