2005-04-16 15:20:36 -07:00
/*
* drivers / net / gianfar_ethtool . c
*
* Gianfar Ethernet Driver
* Ethtool support for Gianfar Enet
* Based on e1000 ethtool support
*
* Author : Andy Fleming
2005-11-13 16:06:30 -08:00
* Maintainer : Kumar Gala
2009-11-02 07:03:00 +00:00
* Modifier : Sandeep Gopalpet < sandeep . kumar @ freescale . com >
2005-04-16 15:20:36 -07:00
*
2011-06-07 21:46:51 +00:00
* Copyright 2003 - 2006 , 2008 - 2009 , 2011 Freescale Semiconductor , Inc .
2005-04-16 15:20:36 -07:00
*
2006-09-13 13:24:59 -04:00
* This software may be used and distributed according to
* the terms of the GNU Public License , Version 2 , incorporated herein
2005-04-16 15:20:36 -07:00
* by reference .
*/
2011-06-14 08:57:47 +00:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2005-04-16 15:20:36 -07:00
# include <linux/kernel.h>
# include <linux/string.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/init.h>
# include <linux/delay.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/skbuff.h>
# include <linux/spinlock.h>
# include <linux/mm.h>
# include <asm/io.h>
# include <asm/irq.h>
# include <asm/uaccess.h>
# include <linux/module.h>
# include <linux/crc32.h>
# include <asm/types.h>
# include <linux/ethtool.h>
2005-09-23 22:54:21 -04:00
# include <linux/mii.h>
# include <linux/phy.h>
2011-06-20 13:57:59 -07:00
# include <linux/sort.h>
2011-07-07 04:30:29 -07:00
# include <linux/if_vlan.h>
2005-04-16 15:20:36 -07:00
# include "gianfar.h"
2005-06-20 10:54:21 -05:00
extern void gfar_start ( struct net_device * dev ) ;
2009-11-02 07:03:00 +00:00
extern int gfar_clean_rx_ring ( struct gfar_priv_rx_q * rx_queue , int rx_work_limit ) ;
2005-04-16 15:20:36 -07:00
2005-09-23 22:54:21 -04:00
# define GFAR_MAX_COAL_USECS 0xffff
# define GFAR_MAX_COAL_FRAMES 0xff
2005-06-20 10:54:21 -05:00
static void gfar_fill_stats ( struct net_device * dev , struct ethtool_stats * dummy ,
2005-04-16 15:20:36 -07:00
u64 * buf ) ;
2005-06-20 10:54:21 -05:00
static void gfar_gstrings ( struct net_device * dev , u32 stringset , u8 * buf ) ;
static int gfar_gcoalesce ( struct net_device * dev , struct ethtool_coalesce * cvals ) ;
static int gfar_scoalesce ( struct net_device * dev , struct ethtool_coalesce * cvals ) ;
static void gfar_gringparam ( struct net_device * dev , struct ethtool_ringparam * rvals ) ;
static int gfar_sringparam ( struct net_device * dev , struct ethtool_ringparam * rvals ) ;
static void gfar_gdrvinfo ( struct net_device * dev , struct ethtool_drvinfo * drvinfo ) ;
2005-04-16 15:20:36 -07:00
static char stat_gstrings [ ] [ ETH_GSTRING_LEN ] = {
" rx-dropped-by-kernel " ,
" rx-large-frame-errors " ,
" rx-short-frame-errors " ,
" rx-non-octet-errors " ,
" rx-crc-errors " ,
" rx-overrun-errors " ,
" rx-busy-errors " ,
" rx-babbling-errors " ,
" rx-truncated-frames " ,
" ethernet-bus-error " ,
" tx-babbling-errors " ,
" tx-underrun-errors " ,
" rx-skb-missing-errors " ,
" tx-timeout-errors " ,
" tx-rx-64-frames " ,
" tx-rx-65-127-frames " ,
" tx-rx-128-255-frames " ,
" tx-rx-256-511-frames " ,
" tx-rx-512-1023-frames " ,
" tx-rx-1024-1518-frames " ,
" tx-rx-1519-1522-good-vlan " ,
" rx-bytes " ,
" rx-packets " ,
" rx-fcs-errors " ,
" receive-multicast-packet " ,
" receive-broadcast-packet " ,
" rx-control-frame-packets " ,
" rx-pause-frame-packets " ,
" rx-unknown-op-code " ,
" rx-alignment-error " ,
" rx-frame-length-error " ,
" rx-code-error " ,
" rx-carrier-sense-error " ,
" rx-undersize-packets " ,
" rx-oversize-packets " ,
" rx-fragmented-frames " ,
" rx-jabber-frames " ,
" rx-dropped-frames " ,
" tx-byte-counter " ,
" tx-packets " ,
" tx-multicast-packets " ,
" tx-broadcast-packets " ,
" tx-pause-control-frames " ,
" tx-deferral-packets " ,
" tx-excessive-deferral-packets " ,
" tx-single-collision-packets " ,
" tx-multiple-collision-packets " ,
" tx-late-collision-packets " ,
" tx-excessive-collision-packets " ,
" tx-total-collision " ,
" reserved " ,
" tx-dropped-frames " ,
" tx-jabber-frames " ,
" tx-fcs-errors " ,
" tx-control-frames " ,
" tx-oversize-frames " ,
" tx-undersize-frames " ,
" tx-fragmented-frames " ,
} ;
2005-06-20 10:54:21 -05:00
/* Fill in a buffer with the strings which correspond to the
* stats */
static void gfar_gstrings ( struct net_device * dev , u32 stringset , u8 * buf )
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2005-11-11 12:38:59 -06:00
2008-12-16 15:29:15 -08:00
if ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_RMON )
2005-06-20 10:54:21 -05:00
memcpy ( buf , stat_gstrings , GFAR_STATS_LEN * ETH_GSTRING_LEN ) ;
else
memcpy ( buf , stat_gstrings ,
GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN ) ;
}
2005-04-16 15:20:36 -07:00
/* Fill in an array of 64-bit statistics from various sources.
* This array will be appended to the end of the ethtool_stats
* structure , and returned to user space
*/
2005-06-20 10:54:21 -05:00
static void gfar_fill_stats ( struct net_device * dev , struct ethtool_stats * dummy , u64 * buf )
2005-04-16 15:20:36 -07:00
{
int i ;
struct gfar_private * priv = netdev_priv ( dev ) ;
2009-11-02 07:03:34 +00:00
struct gfar __iomem * regs = priv - > gfargrp [ 0 ] . regs ;
2005-04-16 15:20:36 -07:00
u64 * extra = ( u64 * ) & priv - > extra_stats ;
2008-12-16 15:29:15 -08:00
if ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_RMON ) {
2009-11-02 07:03:09 +00:00
u32 __iomem * rmon = ( u32 __iomem * ) & regs - > rmon ;
2005-06-20 10:54:21 -05:00
struct gfar_stats * stats = ( struct gfar_stats * ) buf ;
2005-04-16 15:20:36 -07:00
2005-06-20 10:54:21 -05:00
for ( i = 0 ; i < GFAR_RMON_LEN ; i + + )
2006-02-01 15:18:03 -06:00
stats - > rmon [ i ] = ( u64 ) gfar_read ( & rmon [ i ] ) ;
2005-04-16 15:20:36 -07:00
2005-06-20 10:54:21 -05:00
for ( i = 0 ; i < GFAR_EXTRA_STATS_LEN ; i + + )
stats - > extra [ i ] = extra [ i ] ;
} else
for ( i = 0 ; i < GFAR_EXTRA_STATS_LEN ; i + + )
buf [ i ] = extra [ i ] ;
2005-04-16 15:20:36 -07:00
}
2007-10-03 18:07:32 -07:00
static int gfar_sset_count ( struct net_device * dev , int sset )
2005-04-16 15:20:36 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2007-10-03 18:07:32 -07:00
switch ( sset ) {
case ETH_SS_STATS :
2008-12-16 15:29:15 -08:00
if ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_RMON )
2007-10-03 18:07:32 -07:00
return GFAR_STATS_LEN ;
else
return GFAR_EXTRA_STATS_LEN ;
default :
return - EOPNOTSUPP ;
}
2005-04-16 15:20:36 -07:00
}
/* Fills in the drvinfo structure with some basic info */
2005-06-20 10:54:21 -05:00
static void gfar_gdrvinfo ( struct net_device * dev , struct
2005-04-16 15:20:36 -07:00
ethtool_drvinfo * drvinfo )
{
strncpy ( drvinfo - > driver , DRV_NAME , GFAR_INFOSTR_LEN ) ;
strncpy ( drvinfo - > version , gfar_driver_version , GFAR_INFOSTR_LEN ) ;
strncpy ( drvinfo - > fw_version , " N/A " , GFAR_INFOSTR_LEN ) ;
strncpy ( drvinfo - > bus_info , " N/A " , GFAR_INFOSTR_LEN ) ;
drvinfo - > regdump_len = 0 ;
drvinfo - > eedump_len = 0 ;
}
2005-09-23 22:54:21 -04:00
static int gfar_ssettings ( struct net_device * dev , struct ethtool_cmd * cmd )
{
struct gfar_private * priv = netdev_priv ( dev ) ;
struct phy_device * phydev = priv - > phydev ;
if ( NULL = = phydev )
return - ENODEV ;
return phy_ethtool_sset ( phydev , cmd ) ;
}
2005-04-16 15:20:36 -07:00
/* Return the current settings in the ethtool_cmd structure */
2005-06-20 10:54:21 -05:00
static int gfar_gsettings ( struct net_device * dev , struct ethtool_cmd * cmd )
2005-04-16 15:20:36 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2005-09-23 22:54:21 -04:00
struct phy_device * phydev = priv - > phydev ;
2009-11-02 07:03:00 +00:00
struct gfar_priv_rx_q * rx_queue = NULL ;
struct gfar_priv_tx_q * tx_queue = NULL ;
2005-09-23 22:54:21 -04:00
if ( NULL = = phydev )
return - ENODEV ;
2009-11-02 07:03:15 +00:00
tx_queue = priv - > tx_queue [ 0 ] ;
rx_queue = priv - > rx_queue [ 0 ] ;
2006-09-13 13:24:59 -04:00
2009-11-02 07:03:15 +00:00
/* etsec-1.7 and older versions have only one txic
* and rxic regs although they support multiple queues */
2009-11-02 07:03:00 +00:00
cmd - > maxtxpkt = get_icft_value ( tx_queue - > txic ) ;
cmd - > maxrxpkt = get_icft_value ( rx_queue - > rxic ) ;
2005-04-16 15:20:36 -07:00
2005-09-23 22:54:21 -04:00
return phy_ethtool_gset ( phydev , cmd ) ;
2005-04-16 15:20:36 -07:00
}
/* Return the length of the register structure */
2005-06-20 10:54:21 -05:00
static int gfar_reglen ( struct net_device * dev )
2005-04-16 15:20:36 -07:00
{
return sizeof ( struct gfar ) ;
}
/* Return a dump of the GFAR register space */
2005-06-20 10:54:21 -05:00
static void gfar_get_regs ( struct net_device * dev , struct ethtool_regs * regs , void * regbuf )
2005-04-16 15:20:36 -07:00
{
int i ;
struct gfar_private * priv = netdev_priv ( dev ) ;
2009-11-02 07:03:34 +00:00
u32 __iomem * theregs = ( u32 __iomem * ) priv - > gfargrp [ 0 ] . regs ;
2005-04-16 15:20:36 -07:00
u32 * buf = ( u32 * ) regbuf ;
for ( i = 0 ; i < sizeof ( struct gfar ) / sizeof ( u32 ) ; i + + )
2006-02-01 15:18:03 -06:00
buf [ i ] = gfar_read ( & theregs [ i ] ) ;
2005-04-16 15:20:36 -07:00
}
/* Convert microseconds to ethernet clock ticks, which changes
* depending on what speed the controller is running at */
static unsigned int gfar_usecs2ticks ( struct gfar_private * priv , unsigned int usecs )
{
unsigned int count ;
/* The timer is different, depending on the interface speed */
2005-09-23 22:54:21 -04:00
switch ( priv - > phydev - > speed ) {
case SPEED_1000 :
2005-04-16 15:20:36 -07:00
count = GFAR_GBIT_TIME ;
break ;
2005-09-23 22:54:21 -04:00
case SPEED_100 :
2005-04-16 15:20:36 -07:00
count = GFAR_100_TIME ;
break ;
2005-09-23 22:54:21 -04:00
case SPEED_10 :
2005-04-16 15:20:36 -07:00
default :
count = GFAR_10_TIME ;
break ;
}
/* Make sure we return a number greater than 0
* if usecs > 0 */
2010-09-23 05:40:09 +00:00
return ( usecs * 1000 + count - 1 ) / count ;
2005-04-16 15:20:36 -07:00
}
/* Convert ethernet clock ticks to microseconds */
static unsigned int gfar_ticks2usecs ( struct gfar_private * priv , unsigned int ticks )
{
unsigned int count ;
/* The timer is different, depending on the interface speed */
2005-09-23 22:54:21 -04:00
switch ( priv - > phydev - > speed ) {
case SPEED_1000 :
2005-04-16 15:20:36 -07:00
count = GFAR_GBIT_TIME ;
break ;
2005-09-23 22:54:21 -04:00
case SPEED_100 :
2005-04-16 15:20:36 -07:00
count = GFAR_100_TIME ;
break ;
2005-09-23 22:54:21 -04:00
case SPEED_10 :
2005-04-16 15:20:36 -07:00
default :
count = GFAR_10_TIME ;
break ;
}
/* Make sure we return a number greater than 0 */
/* if ticks is > 0 */
2010-09-23 05:40:09 +00:00
return ( ticks * count ) / 1000 ;
2005-04-16 15:20:36 -07:00
}
/* Get the coalescing parameters, and put them in the cvals
* structure . */
2005-06-20 10:54:21 -05:00
static int gfar_gcoalesce ( struct net_device * dev , struct ethtool_coalesce * cvals )
2005-04-16 15:20:36 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2009-11-02 07:03:00 +00:00
struct gfar_priv_rx_q * rx_queue = NULL ;
struct gfar_priv_tx_q * tx_queue = NULL ;
2008-12-16 15:29:52 -08:00
unsigned long rxtime ;
unsigned long rxcount ;
unsigned long txtime ;
unsigned long txcount ;
2006-09-13 13:24:59 -04:00
2008-12-16 15:29:15 -08:00
if ( ! ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_COALESCE ) )
2005-06-20 10:54:21 -05:00
return - EOPNOTSUPP ;
2005-04-16 15:20:36 -07:00
2005-09-23 22:54:21 -04:00
if ( NULL = = priv - > phydev )
return - ENODEV ;
2009-11-02 07:03:15 +00:00
rx_queue = priv - > rx_queue [ 0 ] ;
tx_queue = priv - > tx_queue [ 0 ] ;
2009-11-02 07:03:00 +00:00
rxtime = get_ictt_value ( rx_queue - > rxic ) ;
rxcount = get_icft_value ( rx_queue - > rxic ) ;
txtime = get_ictt_value ( tx_queue - > txic ) ;
txcount = get_icft_value ( tx_queue - > txic ) ;
2008-12-16 15:29:52 -08:00
cvals - > rx_coalesce_usecs = gfar_ticks2usecs ( priv , rxtime ) ;
cvals - > rx_max_coalesced_frames = rxcount ;
2005-04-16 15:20:36 -07:00
2008-12-16 15:29:52 -08:00
cvals - > tx_coalesce_usecs = gfar_ticks2usecs ( priv , txtime ) ;
cvals - > tx_max_coalesced_frames = txcount ;
2005-04-16 15:20:36 -07:00
cvals - > use_adaptive_rx_coalesce = 0 ;
cvals - > use_adaptive_tx_coalesce = 0 ;
cvals - > pkt_rate_low = 0 ;
cvals - > rx_coalesce_usecs_low = 0 ;
cvals - > rx_max_coalesced_frames_low = 0 ;
cvals - > tx_coalesce_usecs_low = 0 ;
cvals - > tx_max_coalesced_frames_low = 0 ;
/* When the packet rate is below pkt_rate_high but above
* pkt_rate_low ( both measured in packets per second ) the
* normal { rx , tx } _ * coalescing parameters are used .
*/
/* When the packet rate is (measured in packets per second)
* is above pkt_rate_high , the { rx , tx } _ * _high parameters are
* used .
*/
cvals - > pkt_rate_high = 0 ;
cvals - > rx_coalesce_usecs_high = 0 ;
cvals - > rx_max_coalesced_frames_high = 0 ;
cvals - > tx_coalesce_usecs_high = 0 ;
cvals - > tx_max_coalesced_frames_high = 0 ;
/* How often to do adaptive coalescing packet rate sampling,
* measured in seconds . Must not be zero .
*/
cvals - > rate_sample_interval = 0 ;
return 0 ;
}
/* Change the coalescing values.
* Both cvals - > * _usecs and cvals - > * _frames have to be > 0
* in order for coalescing to be active
*/
2005-06-20 10:54:21 -05:00
static int gfar_scoalesce ( struct net_device * dev , struct ethtool_coalesce * cvals )
2005-04-16 15:20:36 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2009-11-02 07:03:34 +00:00
int i = 0 ;
2005-04-16 15:20:36 -07:00
2008-12-16 15:29:15 -08:00
if ( ! ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_COALESCE ) )
2005-06-20 10:54:21 -05:00
return - EOPNOTSUPP ;
2005-04-16 15:20:36 -07:00
/* Set up rx coalescing */
2009-11-02 07:03:34 +00:00
/* As of now, we will enable/disable coalescing for all
* queues together in case of eTSEC2 , this will be modified
* along with the ethtool interface */
2005-04-16 15:20:36 -07:00
if ( ( cvals - > rx_coalesce_usecs = = 0 ) | |
2009-11-02 07:03:34 +00:00
( cvals - > rx_max_coalesced_frames = = 0 ) ) {
for ( i = 0 ; i < priv - > num_rx_queues ; i + + )
priv - > rx_queue [ i ] - > rxcoalescing = 0 ;
} else {
for ( i = 0 ; i < priv - > num_rx_queues ; i + + )
priv - > rx_queue [ i ] - > rxcoalescing = 1 ;
}
2005-04-16 15:20:36 -07:00
2005-09-23 22:54:21 -04:00
if ( NULL = = priv - > phydev )
return - ENODEV ;
/* Check the bounds of the values */
if ( cvals - > rx_coalesce_usecs > GFAR_MAX_COAL_USECS ) {
pr_info ( " Coalescing is limited to %d microseconds \n " ,
2011-06-14 08:57:47 +00:00
GFAR_MAX_COAL_USECS ) ;
2005-09-23 22:54:21 -04:00
return - EINVAL ;
}
if ( cvals - > rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES ) {
pr_info ( " Coalescing is limited to %d frames \n " ,
2011-06-14 08:57:47 +00:00
GFAR_MAX_COAL_FRAMES ) ;
2005-09-23 22:54:21 -04:00
return - EINVAL ;
}
2009-11-02 07:03:34 +00:00
for ( i = 0 ; i < priv - > num_rx_queues ; i + + ) {
priv - > rx_queue [ i ] - > rxic = mk_ic_value (
cvals - > rx_max_coalesced_frames ,
gfar_usecs2ticks ( priv , cvals - > rx_coalesce_usecs ) ) ;
}
2005-04-16 15:20:36 -07:00
/* Set up tx coalescing */
if ( ( cvals - > tx_coalesce_usecs = = 0 ) | |
2009-11-02 07:03:34 +00:00
( cvals - > tx_max_coalesced_frames = = 0 ) ) {
for ( i = 0 ; i < priv - > num_tx_queues ; i + + )
priv - > tx_queue [ i ] - > txcoalescing = 0 ;
} else {
for ( i = 0 ; i < priv - > num_tx_queues ; i + + )
priv - > tx_queue [ i ] - > txcoalescing = 1 ;
}
2005-04-16 15:20:36 -07:00
2005-09-23 22:54:21 -04:00
/* Check the bounds of the values */
if ( cvals - > tx_coalesce_usecs > GFAR_MAX_COAL_USECS ) {
pr_info ( " Coalescing is limited to %d microseconds \n " ,
2011-06-14 08:57:47 +00:00
GFAR_MAX_COAL_USECS ) ;
2005-09-23 22:54:21 -04:00
return - EINVAL ;
}
if ( cvals - > tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES ) {
pr_info ( " Coalescing is limited to %d frames \n " ,
2011-06-14 08:57:47 +00:00
GFAR_MAX_COAL_FRAMES ) ;
2005-09-23 22:54:21 -04:00
return - EINVAL ;
}
2009-11-02 07:03:34 +00:00
for ( i = 0 ; i < priv - > num_tx_queues ; i + + ) {
priv - > tx_queue [ i ] - > txic = mk_ic_value (
cvals - > tx_max_coalesced_frames ,
gfar_usecs2ticks ( priv , cvals - > tx_coalesce_usecs ) ) ;
}
2005-04-16 15:20:36 -07:00
2009-11-02 07:03:34 +00:00
gfar_configure_coalescing ( priv , 0xFF , 0xFF ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
/* Fills in rvals with the current ring parameters. Currently,
* rx , rx_mini , and rx_jumbo rings are the same size , as mini and
* jumbo are ignored by the driver */
2005-06-20 10:54:21 -05:00
static void gfar_gringparam ( struct net_device * dev , struct ethtool_ringparam * rvals )
2005-04-16 15:20:36 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2009-11-02 07:03:00 +00:00
struct gfar_priv_tx_q * tx_queue = NULL ;
struct gfar_priv_rx_q * rx_queue = NULL ;
2009-11-02 07:03:15 +00:00
tx_queue = priv - > tx_queue [ 0 ] ;
rx_queue = priv - > rx_queue [ 0 ] ;
2005-04-16 15:20:36 -07:00
rvals - > rx_max_pending = GFAR_RX_MAX_RING_SIZE ;
rvals - > rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE ;
rvals - > rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE ;
rvals - > tx_max_pending = GFAR_TX_MAX_RING_SIZE ;
/* Values changeable by the user. The valid values are
* in the range 1 to the " *_max_pending " counterpart above .
*/
2009-11-02 07:03:00 +00:00
rvals - > rx_pending = rx_queue - > rx_ring_size ;
rvals - > rx_mini_pending = rx_queue - > rx_ring_size ;
rvals - > rx_jumbo_pending = rx_queue - > rx_ring_size ;
rvals - > tx_pending = tx_queue - > tx_ring_size ;
2005-04-16 15:20:36 -07:00
}
/* Change the current ring parameters, stopping the controller if
* necessary so that we don ' t mess things up while we ' re in
* motion . We wait for the ring to be clean before reallocating
* the rings . */
2005-06-20 10:54:21 -05:00
static int gfar_sringparam ( struct net_device * dev , struct ethtool_ringparam * rvals )
2005-04-16 15:20:36 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2009-11-02 07:03:15 +00:00
int err = 0 , i = 0 ;
2005-04-16 15:20:36 -07:00
if ( rvals - > rx_pending > GFAR_RX_MAX_RING_SIZE )
return - EINVAL ;
if ( ! is_power_of_2 ( rvals - > rx_pending ) ) {
2011-06-14 08:57:47 +00:00
netdev_err ( dev , " Ring sizes must be a power of 2 \n " ) ;
2005-04-16 15:20:36 -07:00
return - EINVAL ;
}
if ( rvals - > tx_pending > GFAR_TX_MAX_RING_SIZE )
return - EINVAL ;
if ( ! is_power_of_2 ( rvals - > tx_pending ) ) {
2011-06-14 08:57:47 +00:00
netdev_err ( dev , " Ring sizes must be a power of 2 \n " ) ;
2005-04-16 15:20:36 -07:00
return - EINVAL ;
}
2009-11-02 07:03:00 +00:00
2005-06-20 10:54:21 -05:00
if ( dev - > flags & IFF_UP ) {
unsigned long flags ;
2005-04-16 15:20:36 -07:00
2005-06-20 10:54:21 -05:00
/* Halt TX and RX, and process the frames which
* have already been received */
2009-11-02 07:03:15 +00:00
local_irq_save ( flags ) ;
lock_tx_qs ( priv ) ;
lock_rx_qs ( priv ) ;
2006-04-20 16:44:29 -05:00
2005-06-20 10:54:21 -05:00
gfar_halt ( dev ) ;
2006-04-20 16:44:29 -05:00
2009-11-02 07:03:15 +00:00
unlock_rx_qs ( priv ) ;
unlock_tx_qs ( priv ) ;
local_irq_restore ( flags ) ;
2005-04-16 15:20:36 -07:00
2009-11-02 07:03:15 +00:00
for ( i = 0 ; i < priv - > num_rx_queues ; i + + )
gfar_clean_rx_ring ( priv - > rx_queue [ i ] ,
priv - > rx_queue [ i ] - > rx_ring_size ) ;
2008-12-16 15:30:20 -08:00
2005-06-20 10:54:21 -05:00
/* Now we take down the rings to rebuild them */
stop_gfar ( dev ) ;
}
2005-04-16 15:20:36 -07:00
2005-06-20 10:54:21 -05:00
/* Change the size */
2009-11-02 07:03:15 +00:00
for ( i = 0 ; i < priv - > num_rx_queues ; i + + ) {
priv - > rx_queue [ i ] - > rx_ring_size = rvals - > rx_pending ;
priv - > tx_queue [ i ] - > tx_ring_size = rvals - > tx_pending ;
priv - > tx_queue [ i ] - > num_txbdfree = priv - > tx_queue [ i ] - > tx_ring_size ;
}
2005-04-16 15:20:36 -07:00
2005-06-20 10:54:21 -05:00
/* Rebuild the rings with the new size */
2008-12-16 15:30:20 -08:00
if ( dev - > flags & IFF_UP ) {
2005-06-20 10:54:21 -05:00
err = startup_gfar ( dev ) ;
2009-11-02 07:03:15 +00:00
netif_tx_wake_all_queues ( dev ) ;
2008-12-16 15:30:20 -08:00
}
2005-06-20 10:54:21 -05:00
return err ;
}
2005-04-16 15:20:36 -07:00
2011-04-15 04:50:50 +00:00
int gfar_set_features ( struct net_device * dev , u32 features )
2005-06-20 10:54:21 -05:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2008-07-11 18:04:45 -05:00
unsigned long flags ;
2009-11-02 07:03:15 +00:00
int err = 0 , i = 0 ;
2011-04-15 04:50:50 +00:00
u32 changed = dev - > features ^ features ;
2005-04-16 15:20:36 -07:00
2011-07-20 04:54:19 +00:00
if ( changed & ( NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ) )
gfar_vlan_mode ( dev , features ) ;
2011-04-15 04:50:50 +00:00
if ( ! ( changed & NETIF_F_RXCSUM ) )
return 0 ;
2009-11-02 07:03:00 +00:00
2005-06-20 10:54:21 -05:00
if ( dev - > flags & IFF_UP ) {
/* Halt TX and RX, and process the frames which
* have already been received */
2009-11-02 07:03:15 +00:00
local_irq_save ( flags ) ;
lock_tx_qs ( priv ) ;
lock_rx_qs ( priv ) ;
2006-04-20 16:44:29 -05:00
2005-06-20 10:54:21 -05:00
gfar_halt ( dev ) ;
2006-04-20 16:44:29 -05:00
2009-11-02 07:03:15 +00:00
unlock_tx_qs ( priv ) ;
unlock_rx_qs ( priv ) ;
2010-10-13 09:19:55 +00:00
local_irq_restore ( flags ) ;
2005-06-20 10:54:21 -05:00
2009-11-02 07:03:15 +00:00
for ( i = 0 ; i < priv - > num_rx_queues ; i + + )
gfar_clean_rx_ring ( priv - > rx_queue [ i ] ,
priv - > rx_queue [ i ] - > rx_ring_size ) ;
2008-12-16 15:30:20 -08:00
2005-06-20 10:54:21 -05:00
/* Now we take down the rings to rebuild them */
2005-04-16 15:20:36 -07:00
stop_gfar ( dev ) ;
2011-04-15 04:50:50 +00:00
dev - > features = features ;
2005-04-16 15:20:36 -07:00
err = startup_gfar ( dev ) ;
2009-11-02 07:03:15 +00:00
netif_tx_wake_all_queues ( dev ) ;
2008-12-16 15:30:20 -08:00
}
2005-04-16 15:20:36 -07:00
return err ;
}
2005-06-20 10:54:21 -05:00
static uint32_t gfar_get_msglevel ( struct net_device * dev )
2006-09-13 13:24:59 -04:00
{
2005-06-20 10:54:21 -05:00
struct gfar_private * priv = netdev_priv ( dev ) ;
return priv - > msg_enable ;
2006-09-13 13:24:59 -04:00
}
2005-06-20 10:54:21 -05:00
static void gfar_set_msglevel ( struct net_device * dev , uint32_t data )
2006-09-13 13:24:59 -04:00
{
2005-06-20 10:54:21 -05:00
struct gfar_private * priv = netdev_priv ( dev ) ;
priv - > msg_enable = data ;
}
2008-07-11 18:04:45 -05:00
# ifdef CONFIG_PM
static void gfar_get_wol ( struct net_device * dev , struct ethtool_wolinfo * wol )
{
struct gfar_private * priv = netdev_priv ( dev ) ;
2008-12-16 15:29:15 -08:00
if ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET ) {
2008-07-11 18:04:45 -05:00
wol - > supported = WAKE_MAGIC ;
wol - > wolopts = priv - > wol_en ? WAKE_MAGIC : 0 ;
} else {
wol - > supported = wol - > wolopts = 0 ;
}
}
static int gfar_set_wol ( struct net_device * dev , struct ethtool_wolinfo * wol )
{
struct gfar_private * priv = netdev_priv ( dev ) ;
unsigned long flags ;
2008-12-16 15:29:15 -08:00
if ( ! ( priv - > device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET ) & &
2008-07-11 18:04:45 -05:00
wol - > wolopts ! = 0 )
return - EINVAL ;
if ( wol - > wolopts & ~ WAKE_MAGIC )
return - EINVAL ;
2010-11-09 11:54:19 +00:00
device_set_wakeup_enable ( & dev - > dev , wol - > wolopts & WAKE_MAGIC ) ;
2008-07-11 18:04:45 -05:00
spin_lock_irqsave ( & priv - > bflock , flags ) ;
2010-11-09 11:54:19 +00:00
priv - > wol_en = ! ! device_may_wakeup ( & dev - > dev ) ;
2008-07-11 18:04:45 -05:00
spin_unlock_irqrestore ( & priv - > bflock , flags ) ;
return 0 ;
}
# endif
2005-06-20 10:54:21 -05:00
2009-11-02 07:03:40 +00:00
static void ethflow_to_filer_rules ( struct gfar_private * priv , u64 ethflow )
{
u32 fcr = 0x0 , fpr = FPR_FILER_MASK ;
if ( ethflow & RXH_L2DA ) {
fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
if ( ethflow & RXH_VLAN ) {
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0 ;
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
if ( ethflow & RXH_IP_SRC ) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
if ( ethflow & ( RXH_IP_DST ) ) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
if ( ethflow & RXH_L3_PROTO ) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
if ( ethflow & RXH_L4_B_0_1 ) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
if ( ethflow & RXH_L4_B_2_3 ) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0 ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = fpr ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = fcr ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx , fcr , fpr ) ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
}
static int gfar_ethflow_to_filer_table ( struct gfar_private * priv , u64 ethflow , u64 class )
{
unsigned int last_rule_idx = priv - > cur_filer_idx ;
unsigned int cmp_rqfpr ;
2011-08-11 17:07:25 +00:00
unsigned int * local_rqfpr ;
unsigned int * local_rqfcr ;
2009-11-02 07:03:40 +00:00
int i = 0x0 , k = 0x0 ;
int j = MAX_FILER_IDX , l = 0x0 ;
2011-08-11 17:07:25 +00:00
int ret = 1 ;
local_rqfpr = kmalloc ( sizeof ( unsigned int ) * ( MAX_FILER_IDX + 1 ) ,
GFP_KERNEL ) ;
local_rqfcr = kmalloc ( sizeof ( unsigned int ) * ( MAX_FILER_IDX + 1 ) ,
GFP_KERNEL ) ;
if ( ! local_rqfpr | | ! local_rqfcr ) {
pr_err ( " Out of memory \n " ) ;
ret = 0 ;
goto err ;
}
2009-11-02 07:03:40 +00:00
switch ( class ) {
case TCP_V4_FLOW :
cmp_rqfpr = RQFPR_IPV4 | RQFPR_TCP ;
break ;
case UDP_V4_FLOW :
cmp_rqfpr = RQFPR_IPV4 | RQFPR_UDP ;
break ;
case TCP_V6_FLOW :
cmp_rqfpr = RQFPR_IPV6 | RQFPR_TCP ;
break ;
case UDP_V6_FLOW :
cmp_rqfpr = RQFPR_IPV6 | RQFPR_UDP ;
break ;
default :
2011-06-14 08:57:47 +00:00
pr_err ( " Right now this class is not supported \n " ) ;
2011-08-11 17:07:25 +00:00
ret = 0 ;
goto err ;
2009-11-02 07:03:40 +00:00
}
for ( i = 0 ; i < MAX_FILER_IDX + 1 ; i + + ) {
2011-06-07 21:46:51 +00:00
local_rqfpr [ j ] = priv - > ftp_rqfpr [ i ] ;
local_rqfcr [ j ] = priv - > ftp_rqfcr [ i ] ;
2009-11-02 07:03:40 +00:00
j - - ;
2011-06-07 21:46:51 +00:00
if ( ( priv - > ftp_rqfcr [ i ] = = ( RQFCR_PID_PARSE |
2009-11-02 07:03:40 +00:00
RQFCR_CLE | RQFCR_AND ) ) & &
2011-06-07 21:46:51 +00:00
( priv - > ftp_rqfpr [ i ] = = cmp_rqfpr ) )
2009-11-02 07:03:40 +00:00
break ;
}
if ( i = = MAX_FILER_IDX + 1 ) {
2011-06-14 08:57:47 +00:00
pr_err ( " No parse rule found, can't create hash rules \n " ) ;
2011-08-11 17:07:25 +00:00
ret = 0 ;
goto err ;
2009-11-02 07:03:40 +00:00
}
/* If a match was found, then it begins the starting of a cluster rule
* if it was already programmed , we need to overwrite these rules
*/
for ( l = i + 1 ; l < MAX_FILER_IDX ; l + + ) {
2011-06-07 21:46:51 +00:00
if ( ( priv - > ftp_rqfcr [ l ] & RQFCR_CLE ) & &
! ( priv - > ftp_rqfcr [ l ] & RQFCR_AND ) ) {
priv - > ftp_rqfcr [ l ] = RQFCR_CLE | RQFCR_CMP_EXACT |
2009-11-02 07:03:40 +00:00
RQFCR_HASHTBL_0 | RQFCR_PID_MASK ;
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ l ] = FPR_FILER_MASK ;
gfar_write_filer ( priv , l , priv - > ftp_rqfcr [ l ] ,
priv - > ftp_rqfpr [ l ] ) ;
2009-11-02 07:03:40 +00:00
break ;
}
2011-06-07 21:46:51 +00:00
if ( ! ( priv - > ftp_rqfcr [ l ] & RQFCR_CLE ) & &
( priv - > ftp_rqfcr [ l ] & RQFCR_AND ) )
2009-11-02 07:03:40 +00:00
continue ;
else {
2011-06-07 21:46:51 +00:00
local_rqfpr [ j ] = priv - > ftp_rqfpr [ l ] ;
local_rqfcr [ j ] = priv - > ftp_rqfcr [ l ] ;
2009-11-02 07:03:40 +00:00
j - - ;
}
}
priv - > cur_filer_idx = l - 1 ;
last_rule_idx = l ;
/* hash rules */
ethflow_to_filer_rules ( priv , ethflow ) ;
/* Write back the popped out rules again */
for ( k = j + 1 ; k < MAX_FILER_IDX ; k + + ) {
2011-06-07 21:46:51 +00:00
priv - > ftp_rqfpr [ priv - > cur_filer_idx ] = local_rqfpr [ k ] ;
priv - > ftp_rqfcr [ priv - > cur_filer_idx ] = local_rqfcr [ k ] ;
2009-11-02 07:03:40 +00:00
gfar_write_filer ( priv , priv - > cur_filer_idx ,
local_rqfcr [ k ] , local_rqfpr [ k ] ) ;
if ( ! priv - > cur_filer_idx )
break ;
priv - > cur_filer_idx = priv - > cur_filer_idx - 1 ;
}
2011-08-11 17:07:25 +00:00
err :
kfree ( local_rqfcr ) ;
kfree ( local_rqfpr ) ;
return ret ;
2009-11-02 07:03:40 +00:00
}
static int gfar_set_hash_opts ( struct gfar_private * priv , struct ethtool_rxnfc * cmd )
{
/* write the filer rules here */
if ( ! gfar_ethflow_to_filer_table ( priv , cmd - > data , cmd - > flow_type ) )
2011-04-08 13:45:11 +00:00
return - EINVAL ;
2009-11-02 07:03:40 +00:00
return 0 ;
}
2011-06-20 13:57:59 -07:00
static int gfar_check_filer_hardware ( struct gfar_private * priv )
{
struct gfar __iomem * regs = NULL ;
u32 i ;
regs = priv - > gfargrp [ 0 ] . regs ;
/* Check if we are in FIFO mode */
i = gfar_read ( & regs - > ecntrl ) ;
i & = ECNTRL_FIFM ;
if ( i = = ECNTRL_FIFM ) {
netdev_notice ( priv - > ndev , " Interface in FIFO mode \n " ) ;
i = gfar_read ( & regs - > rctrl ) ;
i & = RCTRL_PRSDEP_MASK | RCTRL_PRSFM ;
if ( i = = ( RCTRL_PRSDEP_MASK | RCTRL_PRSFM ) ) {
netdev_info ( priv - > ndev ,
" Receive Queue Filtering enabled \n " ) ;
} else {
netdev_warn ( priv - > ndev ,
" Receive Queue Filtering disabled \n " ) ;
return - EOPNOTSUPP ;
}
}
/* Or in standard mode */
else {
i = gfar_read ( & regs - > rctrl ) ;
i & = RCTRL_PRSDEP_MASK ;
if ( i = = RCTRL_PRSDEP_MASK ) {
netdev_info ( priv - > ndev ,
" Receive Queue Filtering enabled \n " ) ;
} else {
netdev_warn ( priv - > ndev ,
" Receive Queue Filtering disabled \n " ) ;
return - EOPNOTSUPP ;
}
}
/* Sets the properties for arbitrary filer rule
* to the first 4 Layer 4 Bytes */
regs - > rbifx = 0xC0C1C2C3 ;
return 0 ;
}
static int gfar_comp_asc ( const void * a , const void * b )
{
return memcmp ( a , b , 4 ) ;
}
static int gfar_comp_desc ( const void * a , const void * b )
{
return - memcmp ( a , b , 4 ) ;
}
static void gfar_swap ( void * a , void * b , int size )
{
u32 * _a = a ;
u32 * _b = b ;
swap ( _a [ 0 ] , _b [ 0 ] ) ;
swap ( _a [ 1 ] , _b [ 1 ] ) ;
swap ( _a [ 2 ] , _b [ 2 ] ) ;
swap ( _a [ 3 ] , _b [ 3 ] ) ;
}
/* Write a mask to filer cache */
static void gfar_set_mask ( u32 mask , struct filer_table * tab )
{
tab - > fe [ tab - > index ] . ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT ;
tab - > fe [ tab - > index ] . prop = mask ;
tab - > index + + ;
}
/* Sets parse bits (e.g. IP or TCP) */
static void gfar_set_parse_bits ( u32 value , u32 mask , struct filer_table * tab )
{
gfar_set_mask ( mask , tab ) ;
tab - > fe [ tab - > index ] . ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
| RQFCR_AND ;
tab - > fe [ tab - > index ] . prop = value ;
tab - > index + + ;
}
static void gfar_set_general_attribute ( u32 value , u32 mask , u32 flag ,
struct filer_table * tab )
{
gfar_set_mask ( mask , tab ) ;
tab - > fe [ tab - > index ] . ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag ;
tab - > fe [ tab - > index ] . prop = value ;
tab - > index + + ;
}
/*
* For setting a tuple of value and mask of type flag
* Example :
* IP - Src = 10.0 .0 .0 / 255.0 .0 .0
* value : 0x0A000000 mask : FF000000 flag : RQFPR_IPV4
*
* Ethtool gives us a value = 0 and mask = ~ 0 for don ' t care a tuple
* For a don ' t care mask it gives us a 0
*
* The check if don ' t care and the mask adjustment if mask = 0 is done for VLAN
* and MAC stuff on an upper level ( due to missing information on this level ) .
* For these guys we can discard them if they are value = 0 and mask = 0.
*
* Further the all masks are one - padded for better hardware efficiency .
*/
static void gfar_set_attribute ( u32 value , u32 mask , u32 flag ,
struct filer_table * tab )
{
switch ( flag ) {
2011-07-07 04:30:29 -07:00
/* 3bit */
2011-06-20 13:57:59 -07:00
case RQFCR_PID_PRI :
if ( ! ( value | mask ) )
return ;
mask | = RQFCR_PID_PRI_MASK ;
break ;
/* 8bit */
case RQFCR_PID_L4P :
case RQFCR_PID_TOS :
if ( ! ~ ( mask | RQFCR_PID_L4P_MASK ) )
return ;
if ( ! mask )
mask = ~ 0 ;
else
mask | = RQFCR_PID_L4P_MASK ;
break ;
/* 12bit */
case RQFCR_PID_VID :
if ( ! ( value | mask ) )
return ;
mask | = RQFCR_PID_VID_MASK ;
break ;
/* 16bit */
case RQFCR_PID_DPT :
case RQFCR_PID_SPT :
case RQFCR_PID_ETY :
if ( ! ~ ( mask | RQFCR_PID_PORT_MASK ) )
return ;
if ( ! mask )
mask = ~ 0 ;
else
mask | = RQFCR_PID_PORT_MASK ;
break ;
/* 24bit */
case RQFCR_PID_DAH :
case RQFCR_PID_DAL :
case RQFCR_PID_SAH :
case RQFCR_PID_SAL :
if ( ! ( value | mask ) )
return ;
mask | = RQFCR_PID_MAC_MASK ;
break ;
/* for all real 32bit masks */
default :
if ( ! ~ mask )
return ;
if ( ! mask )
mask = ~ 0 ;
break ;
}
gfar_set_general_attribute ( value , mask , flag , tab ) ;
}
/* Translates value and mask for UDP, TCP or SCTP */
static void gfar_set_basic_ip ( struct ethtool_tcpip4_spec * value ,
struct ethtool_tcpip4_spec * mask , struct filer_table * tab )
{
gfar_set_attribute ( value - > ip4src , mask - > ip4src , RQFCR_PID_SIA , tab ) ;
gfar_set_attribute ( value - > ip4dst , mask - > ip4dst , RQFCR_PID_DIA , tab ) ;
gfar_set_attribute ( value - > pdst , mask - > pdst , RQFCR_PID_DPT , tab ) ;
gfar_set_attribute ( value - > psrc , mask - > psrc , RQFCR_PID_SPT , tab ) ;
gfar_set_attribute ( value - > tos , mask - > tos , RQFCR_PID_TOS , tab ) ;
}
/* Translates value and mask for RAW-IP4 */
static void gfar_set_user_ip ( struct ethtool_usrip4_spec * value ,
struct ethtool_usrip4_spec * mask , struct filer_table * tab )
{
gfar_set_attribute ( value - > ip4src , mask - > ip4src , RQFCR_PID_SIA , tab ) ;
gfar_set_attribute ( value - > ip4dst , mask - > ip4dst , RQFCR_PID_DIA , tab ) ;
gfar_set_attribute ( value - > tos , mask - > tos , RQFCR_PID_TOS , tab ) ;
gfar_set_attribute ( value - > proto , mask - > proto , RQFCR_PID_L4P , tab ) ;
gfar_set_attribute ( value - > l4_4_bytes , mask - > l4_4_bytes , RQFCR_PID_ARB ,
tab ) ;
}
/* Translates value and mask for ETHER spec */
static void gfar_set_ether ( struct ethhdr * value , struct ethhdr * mask ,
struct filer_table * tab )
{
u32 upper_temp_mask = 0 ;
u32 lower_temp_mask = 0 ;
/* Source address */
if ( ! is_broadcast_ether_addr ( mask - > h_source ) ) {
if ( is_zero_ether_addr ( mask - > h_source ) ) {
upper_temp_mask = 0xFFFFFFFF ;
lower_temp_mask = 0xFFFFFFFF ;
} else {
upper_temp_mask = mask - > h_source [ 0 ] < < 16
| mask - > h_source [ 1 ] < < 8
| mask - > h_source [ 2 ] ;
lower_temp_mask = mask - > h_source [ 3 ] < < 16
| mask - > h_source [ 4 ] < < 8
| mask - > h_source [ 5 ] ;
}
/* Upper 24bit */
gfar_set_attribute (
value - > h_source [ 0 ] < < 16 | value - > h_source [ 1 ]
< < 8 | value - > h_source [ 2 ] ,
upper_temp_mask , RQFCR_PID_SAH , tab ) ;
/* And the same for the lower part */
gfar_set_attribute (
value - > h_source [ 3 ] < < 16 | value - > h_source [ 4 ]
< < 8 | value - > h_source [ 5 ] ,
lower_temp_mask , RQFCR_PID_SAL , tab ) ;
}
/* Destination address */
if ( ! is_broadcast_ether_addr ( mask - > h_dest ) ) {
/* Special for destination is limited broadcast */
if ( ( is_broadcast_ether_addr ( value - > h_dest )
& & is_zero_ether_addr ( mask - > h_dest ) ) ) {
gfar_set_parse_bits ( RQFPR_EBC , RQFPR_EBC , tab ) ;
} else {
if ( is_zero_ether_addr ( mask - > h_dest ) ) {
upper_temp_mask = 0xFFFFFFFF ;
lower_temp_mask = 0xFFFFFFFF ;
} else {
upper_temp_mask = mask - > h_dest [ 0 ] < < 16
| mask - > h_dest [ 1 ] < < 8
| mask - > h_dest [ 2 ] ;
lower_temp_mask = mask - > h_dest [ 3 ] < < 16
| mask - > h_dest [ 4 ] < < 8
| mask - > h_dest [ 5 ] ;
}
/* Upper 24bit */
gfar_set_attribute (
value - > h_dest [ 0 ] < < 16
| value - > h_dest [ 1 ] < < 8
| value - > h_dest [ 2 ] ,
upper_temp_mask , RQFCR_PID_DAH , tab ) ;
/* And the same for the lower part */
gfar_set_attribute (
value - > h_dest [ 3 ] < < 16
| value - > h_dest [ 4 ] < < 8
| value - > h_dest [ 5 ] ,
lower_temp_mask , RQFCR_PID_DAL , tab ) ;
}
}
gfar_set_attribute ( value - > h_proto , mask - > h_proto , RQFCR_PID_ETY , tab ) ;
}
/* Convert a rule to binary filter format of gianfar */
static int gfar_convert_to_filer ( struct ethtool_rx_flow_spec * rule ,
struct filer_table * tab )
{
u32 vlan = 0 , vlan_mask = 0 ;
u32 id = 0 , id_mask = 0 ;
u32 cfi = 0 , cfi_mask = 0 ;
u32 prio = 0 , prio_mask = 0 ;
u32 old_index = tab - > index ;
/* Check if vlan is wanted */
if ( ( rule - > flow_type & FLOW_EXT ) & & ( rule - > m_ext . vlan_tci ! = 0xFFFF ) ) {
if ( ! rule - > m_ext . vlan_tci )
rule - > m_ext . vlan_tci = 0xFFFF ;
vlan = RQFPR_VLN ;
vlan_mask = RQFPR_VLN ;
/* Separate the fields */
2011-07-07 04:30:29 -07:00
id = rule - > h_ext . vlan_tci & VLAN_VID_MASK ;
id_mask = rule - > m_ext . vlan_tci & VLAN_VID_MASK ;
cfi = rule - > h_ext . vlan_tci & VLAN_CFI_MASK ;
cfi_mask = rule - > m_ext . vlan_tci & VLAN_CFI_MASK ;
prio = ( rule - > h_ext . vlan_tci & VLAN_PRIO_MASK ) > > VLAN_PRIO_SHIFT ;
prio_mask = ( rule - > m_ext . vlan_tci & VLAN_PRIO_MASK ) > > VLAN_PRIO_SHIFT ;
if ( cfi = = VLAN_TAG_PRESENT & & cfi_mask = = VLAN_TAG_PRESENT ) {
2011-06-20 13:57:59 -07:00
vlan | = RQFPR_CFI ;
vlan_mask | = RQFPR_CFI ;
2011-07-07 04:30:29 -07:00
} else if ( cfi ! = VLAN_TAG_PRESENT & & cfi_mask = = VLAN_TAG_PRESENT ) {
2011-06-20 13:57:59 -07:00
vlan_mask | = RQFPR_CFI ;
}
}
switch ( rule - > flow_type & ~ FLOW_EXT ) {
case TCP_V4_FLOW :
gfar_set_parse_bits ( RQFPR_IPV4 | RQFPR_TCP | vlan ,
RQFPR_IPV4 | RQFPR_TCP | vlan_mask , tab ) ;
gfar_set_basic_ip ( & rule - > h_u . tcp_ip4_spec ,
& rule - > m_u . tcp_ip4_spec , tab ) ;
break ;
case UDP_V4_FLOW :
gfar_set_parse_bits ( RQFPR_IPV4 | RQFPR_UDP | vlan ,
RQFPR_IPV4 | RQFPR_UDP | vlan_mask , tab ) ;
gfar_set_basic_ip ( & rule - > h_u . udp_ip4_spec ,
& rule - > m_u . udp_ip4_spec , tab ) ;
break ;
case SCTP_V4_FLOW :
gfar_set_parse_bits ( RQFPR_IPV4 | vlan , RQFPR_IPV4 | vlan_mask ,
tab ) ;
gfar_set_attribute ( 132 , 0 , RQFCR_PID_L4P , tab ) ;
gfar_set_basic_ip ( ( struct ethtool_tcpip4_spec * ) & rule - > h_u ,
( struct ethtool_tcpip4_spec * ) & rule - > m_u , tab ) ;
break ;
case IP_USER_FLOW :
gfar_set_parse_bits ( RQFPR_IPV4 | vlan , RQFPR_IPV4 | vlan_mask ,
tab ) ;
gfar_set_user_ip ( ( struct ethtool_usrip4_spec * ) & rule - > h_u ,
( struct ethtool_usrip4_spec * ) & rule - > m_u , tab ) ;
break ;
case ETHER_FLOW :
if ( vlan )
gfar_set_parse_bits ( vlan , vlan_mask , tab ) ;
gfar_set_ether ( ( struct ethhdr * ) & rule - > h_u ,
( struct ethhdr * ) & rule - > m_u , tab ) ;
break ;
default :
return - 1 ;
}
/* Set the vlan attributes in the end */
if ( vlan ) {
gfar_set_attribute ( id , id_mask , RQFCR_PID_VID , tab ) ;
gfar_set_attribute ( prio , prio_mask , RQFCR_PID_PRI , tab ) ;
}
/* If there has been nothing written till now, it must be a default */
if ( tab - > index = = old_index ) {
gfar_set_mask ( 0xFFFFFFFF , tab ) ;
tab - > fe [ tab - > index ] . ctrl = 0x20 ;
tab - > fe [ tab - > index ] . prop = 0x0 ;
tab - > index + + ;
}
/* Remove last AND */
tab - > fe [ tab - > index - 1 ] . ctrl & = ( ~ RQFCR_AND ) ;
/* Specify which queue to use or to drop */
if ( rule - > ring_cookie = = RX_CLS_FLOW_DISC )
tab - > fe [ tab - > index - 1 ] . ctrl | = RQFCR_RJE ;
else
tab - > fe [ tab - > index - 1 ] . ctrl | = ( rule - > ring_cookie < < 10 ) ;
/* Only big enough entries can be clustered */
if ( tab - > index > ( old_index + 2 ) ) {
tab - > fe [ old_index + 1 ] . ctrl | = RQFCR_CLE ;
tab - > fe [ tab - > index - 1 ] . ctrl | = RQFCR_CLE ;
}
/* In rare cases the cache can be full while there is free space in hw */
if ( tab - > index > MAX_FILER_CACHE_IDX - 1 )
return - EBUSY ;
return 0 ;
}
/* Copy size filer entries */
static void gfar_copy_filer_entries ( struct gfar_filer_entry dst [ 0 ] ,
struct gfar_filer_entry src [ 0 ] , s32 size )
{
while ( size > 0 ) {
size - - ;
dst [ size ] . ctrl = src [ size ] . ctrl ;
dst [ size ] . prop = src [ size ] . prop ;
}
}
/* Delete the contents of the filer-table between start and end
* and collapse them */
static int gfar_trim_filer_entries ( u32 begin , u32 end , struct filer_table * tab )
{
int length ;
if ( end > MAX_FILER_CACHE_IDX | | end < begin )
return - EINVAL ;
end + + ;
length = end - begin ;
/* Copy */
while ( end < tab - > index ) {
tab - > fe [ begin ] . ctrl = tab - > fe [ end ] . ctrl ;
tab - > fe [ begin + + ] . prop = tab - > fe [ end + + ] . prop ;
}
/* Fill up with don't cares */
while ( begin < tab - > index ) {
tab - > fe [ begin ] . ctrl = 0x60 ;
tab - > fe [ begin ] . prop = 0xFFFFFFFF ;
begin + + ;
}
tab - > index - = length ;
return 0 ;
}
/* Make space on the wanted location */
static int gfar_expand_filer_entries ( u32 begin , u32 length ,
struct filer_table * tab )
{
if ( length = = 0 | | length + tab - > index > MAX_FILER_CACHE_IDX | | begin
> MAX_FILER_CACHE_IDX )
return - EINVAL ;
gfar_copy_filer_entries ( & ( tab - > fe [ begin + length ] ) , & ( tab - > fe [ begin ] ) ,
tab - > index - length + 1 ) ;
tab - > index + = length ;
return 0 ;
}
static int gfar_get_next_cluster_start ( int start , struct filer_table * tab )
{
for ( ; ( start < tab - > index ) & & ( start < MAX_FILER_CACHE_IDX - 1 ) ; start + + ) {
if ( ( tab - > fe [ start ] . ctrl & ( RQFCR_AND | RQFCR_CLE ) )
= = ( RQFCR_AND | RQFCR_CLE ) )
return start ;
}
return - 1 ;
}
static int gfar_get_next_cluster_end ( int start , struct filer_table * tab )
{
for ( ; ( start < tab - > index ) & & ( start < MAX_FILER_CACHE_IDX - 1 ) ; start + + ) {
if ( ( tab - > fe [ start ] . ctrl & ( RQFCR_AND | RQFCR_CLE ) )
= = ( RQFCR_CLE ) )
return start ;
}
return - 1 ;
}
/*
* Uses hardwares clustering option to reduce
* the number of filer table entries
*/
static void gfar_cluster_filer ( struct filer_table * tab )
{
s32 i = - 1 , j , iend , jend ;
while ( ( i = gfar_get_next_cluster_start ( + + i , tab ) ) ! = - 1 ) {
j = i ;
while ( ( j = gfar_get_next_cluster_start ( + + j , tab ) ) ! = - 1 ) {
/*
* The cluster entries self and the previous one
* ( a mask ) must be identical !
*/
if ( tab - > fe [ i ] . ctrl ! = tab - > fe [ j ] . ctrl )
break ;
if ( tab - > fe [ i ] . prop ! = tab - > fe [ j ] . prop )
break ;
if ( tab - > fe [ i - 1 ] . ctrl ! = tab - > fe [ j - 1 ] . ctrl )
break ;
if ( tab - > fe [ i - 1 ] . prop ! = tab - > fe [ j - 1 ] . prop )
break ;
iend = gfar_get_next_cluster_end ( i , tab ) ;
jend = gfar_get_next_cluster_end ( j , tab ) ;
if ( jend = = - 1 | | iend = = - 1 )
break ;
/*
* First we make some free space , where our cluster
* element should be . Then we copy it there and finally
* delete in from its old location .
*/
if ( gfar_expand_filer_entries ( iend , ( jend - j ) , tab )
= = - EINVAL )
break ;
gfar_copy_filer_entries ( & ( tab - > fe [ iend + 1 ] ) ,
& ( tab - > fe [ jend + 1 ] ) , jend - j ) ;
if ( gfar_trim_filer_entries ( jend - 1 ,
jend + ( jend - j ) , tab ) = = - EINVAL )
return ;
/* Mask out cluster bit */
tab - > fe [ iend ] . ctrl & = ~ ( RQFCR_CLE ) ;
}
}
}
2011-07-07 04:30:29 -07:00
/* Swaps the masked bits of a1<>a2 and b1<>b2 */
static void gfar_swap_bits ( struct gfar_filer_entry * a1 ,
2011-06-20 13:57:59 -07:00
struct gfar_filer_entry * a2 , struct gfar_filer_entry * b1 ,
2011-07-07 04:30:29 -07:00
struct gfar_filer_entry * b2 , u32 mask )
2011-06-20 13:57:59 -07:00
{
u32 temp [ 4 ] ;
2011-07-07 04:30:29 -07:00
temp [ 0 ] = a1 - > ctrl & mask ;
temp [ 1 ] = a2 - > ctrl & mask ;
temp [ 2 ] = b1 - > ctrl & mask ;
temp [ 3 ] = b2 - > ctrl & mask ;
2011-06-20 13:57:59 -07:00
2011-07-07 04:30:29 -07:00
a1 - > ctrl & = ~ mask ;
a2 - > ctrl & = ~ mask ;
b1 - > ctrl & = ~ mask ;
b2 - > ctrl & = ~ mask ;
2011-06-20 13:57:59 -07:00
a1 - > ctrl | = temp [ 1 ] ;
a2 - > ctrl | = temp [ 0 ] ;
b1 - > ctrl | = temp [ 3 ] ;
b2 - > ctrl | = temp [ 2 ] ;
}
/*
* Generate a list consisting of masks values with their start and
* end of validity and block as indicator for parts belonging
* together ( glued by ANDs ) in mask_table
*/
static u32 gfar_generate_mask_table ( struct gfar_mask_entry * mask_table ,
struct filer_table * tab )
{
u32 i , and_index = 0 , block_index = 1 ;
for ( i = 0 ; i < tab - > index ; i + + ) {
/* LSByte of control = 0 sets a mask */
if ( ! ( tab - > fe [ i ] . ctrl & 0xF ) ) {
mask_table [ and_index ] . mask = tab - > fe [ i ] . prop ;
mask_table [ and_index ] . start = i ;
mask_table [ and_index ] . block = block_index ;
if ( and_index > = 1 )
mask_table [ and_index - 1 ] . end = i - 1 ;
and_index + + ;
}
2011-07-07 04:30:29 -07:00
/* cluster starts and ends will be separated because they should
2011-06-20 13:57:59 -07:00
* hold their position */
if ( tab - > fe [ i ] . ctrl & RQFCR_CLE )
block_index + + ;
/* A not set AND indicates the end of a depended block */
if ( ! ( tab - > fe [ i ] . ctrl & RQFCR_AND ) )
block_index + + ;
}
mask_table [ and_index - 1 ] . end = i - 1 ;
return and_index ;
}
/*
* Sorts the entries of mask_table by the values of the masks .
* Important : The 0xFF80 flags of the first and last entry of a
* block must hold their position ( which queue , CLusterEnable , ReJEct ,
* AND )
*/
static void gfar_sort_mask_table ( struct gfar_mask_entry * mask_table ,
struct filer_table * temp_table , u32 and_index )
{
/* Pointer to compare function (_asc or _desc) */
int ( * gfar_comp ) ( const void * , const void * ) ;
u32 i , size = 0 , start = 0 , prev = 1 ;
u32 old_first , old_last , new_first , new_last ;
gfar_comp = & gfar_comp_desc ;
for ( i = 0 ; i < and_index ; i + + ) {
if ( prev ! = mask_table [ i ] . block ) {
old_first = mask_table [ start ] . start + 1 ;
old_last = mask_table [ i - 1 ] . end ;
sort ( mask_table + start , size ,
sizeof ( struct gfar_mask_entry ) ,
gfar_comp , & gfar_swap ) ;
/* Toggle order for every block. This makes the
* thing more efficient ! */
if ( gfar_comp = = gfar_comp_desc )
gfar_comp = & gfar_comp_asc ;
else
gfar_comp = & gfar_comp_desc ;
new_first = mask_table [ start ] . start + 1 ;
new_last = mask_table [ i - 1 ] . end ;
2011-07-07 04:30:29 -07:00
gfar_swap_bits ( & temp_table - > fe [ new_first ] ,
2011-06-20 13:57:59 -07:00
& temp_table - > fe [ old_first ] ,
& temp_table - > fe [ new_last ] ,
2011-07-07 04:30:29 -07:00
& temp_table - > fe [ old_last ] ,
RQFCR_QUEUE | RQFCR_CLE |
RQFCR_RJE | RQFCR_AND
) ;
2011-06-20 13:57:59 -07:00
start = i ;
size = 0 ;
}
size + + ;
prev = mask_table [ i ] . block ;
}
}
/*
* Reduces the number of masks needed in the filer table to save entries
* This is done by sorting the masks of a depended block . A depended block is
* identified by gluing ANDs or CLE . The sorting order toggles after every
* block . Of course entries in scope of a mask must change their location with
* it .
*/
static int gfar_optimize_filer_masks ( struct filer_table * tab )
{
struct filer_table * temp_table ;
struct gfar_mask_entry * mask_table ;
u32 and_index = 0 , previous_mask = 0 , i = 0 , j = 0 , size = 0 ;
s32 ret = 0 ;
/* We need a copy of the filer table because
* we want to change its order */
temp_table = kmalloc ( sizeof ( * temp_table ) , GFP_KERNEL ) ;
if ( temp_table = = NULL )
return - ENOMEM ;
memcpy ( temp_table , tab , sizeof ( * temp_table ) ) ;
mask_table = kcalloc ( MAX_FILER_CACHE_IDX / 2 + 1 ,
sizeof ( struct gfar_mask_entry ) , GFP_KERNEL ) ;
if ( mask_table = = NULL ) {
ret = - ENOMEM ;
goto end ;
}
and_index = gfar_generate_mask_table ( mask_table , tab ) ;
gfar_sort_mask_table ( mask_table , temp_table , and_index ) ;
/* Now we can copy the data from our duplicated filer table to
* the real one in the order the mask table says */
for ( i = 0 ; i < and_index ; i + + ) {
size = mask_table [ i ] . end - mask_table [ i ] . start + 1 ;
gfar_copy_filer_entries ( & ( tab - > fe [ j ] ) ,
& ( temp_table - > fe [ mask_table [ i ] . start ] ) , size ) ;
j + = size ;
}
/* And finally we just have to check for duplicated masks and drop the
* second ones */
for ( i = 0 ; i < tab - > index & & i < MAX_FILER_CACHE_IDX ; i + + ) {
if ( tab - > fe [ i ] . ctrl = = 0x80 ) {
previous_mask = i + + ;
break ;
}
}
for ( ; i < tab - > index & & i < MAX_FILER_CACHE_IDX ; i + + ) {
if ( tab - > fe [ i ] . ctrl = = 0x80 ) {
if ( tab - > fe [ i ] . prop = = tab - > fe [ previous_mask ] . prop ) {
/* Two identical ones found!
* So drop the second one ! */
gfar_trim_filer_entries ( i , i , tab ) ;
} else
/* Not identical! */
previous_mask = i ;
}
}
kfree ( mask_table ) ;
end : kfree ( temp_table ) ;
return ret ;
}
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table ( struct gfar_private * priv ,
struct filer_table * tab )
{
u32 i = 0 ;
if ( tab - > index > MAX_FILER_IDX - 1 )
return - EBUSY ;
/* Avoid inconsistent filer table to be processed */
lock_rx_qs ( priv ) ;
/* Fill regular entries */
for ( ; i < MAX_FILER_IDX - 1 & & ( tab - > fe [ i ] . ctrl | tab - > fe [ i ] . ctrl ) ; i + + )
gfar_write_filer ( priv , i , tab - > fe [ i ] . ctrl , tab - > fe [ i ] . prop ) ;
/* Fill the rest with fall-troughs */
for ( ; i < MAX_FILER_IDX - 1 ; i + + )
gfar_write_filer ( priv , i , 0x60 , 0xFFFFFFFF ) ;
/* Last entry must be default accept
* because that ' s what people expect */
gfar_write_filer ( priv , i , 0x20 , 0x0 ) ;
unlock_rx_qs ( priv ) ;
return 0 ;
}
static int gfar_check_capability ( struct ethtool_rx_flow_spec * flow ,
struct gfar_private * priv )
{
if ( flow - > flow_type & FLOW_EXT ) {
if ( ~ flow - > m_ext . data [ 0 ] | | ~ flow - > m_ext . data [ 1 ] )
netdev_warn ( priv - > ndev ,
" User-specific data not supported! \n " ) ;
if ( ~ flow - > m_ext . vlan_etype )
netdev_warn ( priv - > ndev ,
" VLAN-etype not supported! \n " ) ;
}
if ( flow - > flow_type = = IP_USER_FLOW )
if ( flow - > h_u . usr_ip4_spec . ip_ver ! = ETH_RX_NFC_IP4 )
netdev_warn ( priv - > ndev ,
" IP-Version differing from IPv4 not supported! \n " ) ;
return 0 ;
}
static int gfar_process_filer_changes ( struct gfar_private * priv )
{
struct ethtool_flow_spec_container * j ;
struct filer_table * tab ;
s32 i = 0 ;
s32 ret = 0 ;
/* So index is set to zero, too! */
tab = kzalloc ( sizeof ( * tab ) , GFP_KERNEL ) ;
if ( tab = = NULL )
return - ENOMEM ;
/* Now convert the existing filer data from flow_spec into
* filer tables binary format */
list_for_each_entry ( j , & priv - > rx_list . list , list ) {
ret = gfar_convert_to_filer ( & j - > fs , tab ) ;
if ( ret = = - EBUSY ) {
netdev_err ( priv - > ndev , " Rule not added: No free space! \n " ) ;
goto end ;
}
if ( ret = = - 1 ) {
netdev_err ( priv - > ndev , " Rule not added: Unsupported Flow-type! \n " ) ;
goto end ;
}
}
i = tab - > index ;
/* Optimizations to save entries */
gfar_cluster_filer ( tab ) ;
gfar_optimize_filer_masks ( tab ) ;
pr_debug ( " \n \t Summary: \n "
" \t Data on hardware: %d \n "
" \t Compression rate: %d%% \n " ,
tab - > index , 100 - ( 100 * tab - > index ) / i ) ;
/* Write everything to hardware */
ret = gfar_write_filer_table ( priv , tab ) ;
if ( ret = = - EBUSY ) {
netdev_err ( priv - > ndev , " Rule not added: No free space! \n " ) ;
goto end ;
}
end : kfree ( tab ) ;
return ret ;
}
static void gfar_invert_masks ( struct ethtool_rx_flow_spec * flow )
{
u32 i = 0 ;
for ( i = 0 ; i < sizeof ( flow - > m_u ) ; i + + )
flow - > m_u . hdata [ i ] ^ = 0xFF ;
flow - > m_ext . vlan_etype ^ = 0xFFFF ;
flow - > m_ext . vlan_tci ^ = 0xFFFF ;
flow - > m_ext . data [ 0 ] ^ = ~ 0 ;
flow - > m_ext . data [ 1 ] ^ = ~ 0 ;
}
static int gfar_add_cls ( struct gfar_private * priv ,
struct ethtool_rx_flow_spec * flow )
{
struct ethtool_flow_spec_container * temp , * comp ;
int ret = 0 ;
temp = kmalloc ( sizeof ( * temp ) , GFP_KERNEL ) ;
if ( temp = = NULL )
return - ENOMEM ;
memcpy ( & temp - > fs , flow , sizeof ( temp - > fs ) ) ;
gfar_invert_masks ( & temp - > fs ) ;
ret = gfar_check_capability ( & temp - > fs , priv ) ;
if ( ret )
goto clean_mem ;
/* Link in the new element at the right @location */
if ( list_empty ( & priv - > rx_list . list ) ) {
ret = gfar_check_filer_hardware ( priv ) ;
if ( ret ! = 0 )
goto clean_mem ;
list_add ( & temp - > list , & priv - > rx_list . list ) ;
goto process ;
} else {
list_for_each_entry ( comp , & priv - > rx_list . list , list ) {
if ( comp - > fs . location > flow - > location ) {
list_add_tail ( & temp - > list , & comp - > list ) ;
goto process ;
}
if ( comp - > fs . location = = flow - > location ) {
netdev_err ( priv - > ndev ,
" Rule not added: ID %d not free! \n " ,
flow - > location ) ;
ret = - EBUSY ;
goto clean_mem ;
}
}
list_add_tail ( & temp - > list , & priv - > rx_list . list ) ;
}
process :
ret = gfar_process_filer_changes ( priv ) ;
if ( ret )
goto clean_list ;
priv - > rx_list . count + + ;
return ret ;
clean_list :
list_del ( & temp - > list ) ;
clean_mem :
kfree ( temp ) ;
return ret ;
}
static int gfar_del_cls ( struct gfar_private * priv , u32 loc )
{
struct ethtool_flow_spec_container * comp ;
u32 ret = - EINVAL ;
if ( list_empty ( & priv - > rx_list . list ) )
return ret ;
list_for_each_entry ( comp , & priv - > rx_list . list , list ) {
if ( comp - > fs . location = = loc ) {
list_del ( & comp - > list ) ;
kfree ( comp ) ;
priv - > rx_list . count - - ;
gfar_process_filer_changes ( priv ) ;
ret = 0 ;
break ;
}
}
return ret ;
}
static int gfar_get_cls ( struct gfar_private * priv , struct ethtool_rxnfc * cmd )
{
struct ethtool_flow_spec_container * comp ;
u32 ret = - EINVAL ;
list_for_each_entry ( comp , & priv - > rx_list . list , list ) {
if ( comp - > fs . location = = cmd - > fs . location ) {
memcpy ( & cmd - > fs , & comp - > fs , sizeof ( cmd - > fs ) ) ;
gfar_invert_masks ( & cmd - > fs ) ;
ret = 0 ;
break ;
}
}
return ret ;
}
static int gfar_get_cls_all ( struct gfar_private * priv ,
struct ethtool_rxnfc * cmd , u32 * rule_locs )
{
struct ethtool_flow_spec_container * comp ;
u32 i = 0 ;
list_for_each_entry ( comp , & priv - > rx_list . list , list ) {
2011-09-06 12:44:25 +00:00
if ( i = = cmd - > rule_cnt )
return - EMSGSIZE ;
rule_locs [ i ] = comp - > fs . location ;
i + + ;
2011-06-20 13:57:59 -07:00
}
cmd - > data = MAX_FILER_IDX ;
2011-09-06 13:52:47 +00:00
cmd - > rule_cnt = i ;
2011-06-20 13:57:59 -07:00
return 0 ;
}
2009-11-02 07:03:40 +00:00
static int gfar_set_nfc ( struct net_device * dev , struct ethtool_rxnfc * cmd )
{
struct gfar_private * priv = netdev_priv ( dev ) ;
int ret = 0 ;
2011-06-20 13:57:59 -07:00
mutex_lock ( & priv - > rx_queue_access ) ;
switch ( cmd - > cmd ) {
2009-11-02 07:03:40 +00:00
case ETHTOOL_SRXFH :
ret = gfar_set_hash_opts ( priv , cmd ) ;
break ;
2011-06-20 13:57:59 -07:00
case ETHTOOL_SRXCLSRLINS :
if ( cmd - > fs . ring_cookie ! = RX_CLS_FLOW_DISC & &
cmd - > fs . ring_cookie > = priv - > num_rx_queues ) {
ret = - EINVAL ;
break ;
}
ret = gfar_add_cls ( priv , & cmd - > fs ) ;
break ;
case ETHTOOL_SRXCLSRLDEL :
ret = gfar_del_cls ( priv , cmd - > fs . location ) ;
break ;
2009-11-02 07:03:40 +00:00
default :
ret = - EINVAL ;
}
2011-06-20 13:57:59 -07:00
mutex_unlock ( & priv - > rx_queue_access ) ;
return ret ;
}
static int gfar_get_nfc ( struct net_device * dev , struct ethtool_rxnfc * cmd ,
2011-09-06 13:49:12 +00:00
u32 * rule_locs )
2011-06-20 13:57:59 -07:00
{
struct gfar_private * priv = netdev_priv ( dev ) ;
int ret = 0 ;
switch ( cmd - > cmd ) {
case ETHTOOL_GRXRINGS :
cmd - > data = priv - > num_rx_queues ;
break ;
case ETHTOOL_GRXCLSRLCNT :
cmd - > rule_cnt = priv - > rx_list . count ;
break ;
case ETHTOOL_GRXCLSRULE :
ret = gfar_get_cls ( priv , cmd ) ;
break ;
case ETHTOOL_GRXCLSRLALL :
2011-09-06 13:49:12 +00:00
ret = gfar_get_cls_all ( priv , cmd , rule_locs ) ;
2011-06-20 13:57:59 -07:00
break ;
default :
ret = - EINVAL ;
break ;
}
2009-11-02 07:03:40 +00:00
return ret ;
}
2006-09-13 14:30:00 -04:00
const struct ethtool_ops gfar_ethtool_ops = {
2005-04-16 15:20:36 -07:00
. get_settings = gfar_gsettings ,
2005-09-23 22:54:21 -04:00
. set_settings = gfar_ssettings ,
2005-04-16 15:20:36 -07:00
. get_drvinfo = gfar_gdrvinfo ,
. get_regs_len = gfar_reglen ,
. get_regs = gfar_get_regs ,
. get_link = ethtool_op_get_link ,
. get_coalesce = gfar_gcoalesce ,
. set_coalesce = gfar_scoalesce ,
. get_ringparam = gfar_gringparam ,
. set_ringparam = gfar_sringparam ,
. get_strings = gfar_gstrings ,
2007-10-03 18:07:32 -07:00
. get_sset_count = gfar_sset_count ,
2005-04-16 15:20:36 -07:00
. get_ethtool_stats = gfar_fill_stats ,
2005-06-20 10:54:21 -05:00
. get_msglevel = gfar_get_msglevel ,
. set_msglevel = gfar_set_msglevel ,
2008-07-11 18:04:45 -05:00
# ifdef CONFIG_PM
. get_wol = gfar_get_wol ,
. set_wol = gfar_set_wol ,
# endif
2009-11-02 07:03:40 +00:00
. set_rxnfc = gfar_set_nfc ,
2011-06-20 13:57:59 -07:00
. get_rxnfc = gfar_get_nfc ,
2005-04-16 15:20:36 -07:00
} ;