2019-06-04 10:11:33 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2008-04-27 12:55:59 +01:00
/****************************************************************************
2013-08-29 23:32:48 +01:00
* Driver for Solarflare network controllers and boards
2008-04-27 12:55:59 +01:00
* Copyright 2005 - 2006 Fen Systems Ltd .
2013-08-29 23:32:48 +01:00
* Copyright 2006 - 2013 Solarflare Communications Inc .
2008-04-27 12:55:59 +01:00
*/
# ifndef EFX_EFX_H
# define EFX_EFX_H
2020-07-27 12:55:55 +01:00
# include <linux/indirect_call_wrapper.h>
2008-04-27 12:55:59 +01:00
# include "net_driver.h"
2020-07-27 12:55:55 +01:00
# include "ef100_rx.h"
# include "ef100_tx.h"
2010-09-20 08:43:07 +00:00
# include "filter.h"
2008-04-27 12:55:59 +01:00
2015-05-20 11:11:03 +01:00
int efx_net_open ( struct net_device * net_dev ) ;
int efx_net_stop ( struct net_device * net_dev ) ;
2008-04-27 12:55:59 +01:00
/* TX */
2013-09-23 11:37:59 -07:00
void efx_init_tx_queue_core_txq ( struct efx_tx_queue * tx_queue ) ;
netdev_tx_t efx_hard_start_xmit ( struct sk_buff * skb ,
struct net_device * net_dev ) ;
2020-07-27 12:55:55 +01:00
netdev_tx_t __efx_enqueue_skb ( struct efx_tx_queue * tx_queue , struct sk_buff * skb ) ;
static inline netdev_tx_t efx_enqueue_skb ( struct efx_tx_queue * tx_queue , struct sk_buff * skb )
{
return INDIRECT_CALL_2 ( tx_queue - > efx - > type - > tx_enqueue ,
ef100_enqueue_skb , __efx_enqueue_skb ,
tx_queue , skb ) ;
}
2013-09-23 11:37:59 -07:00
void efx_xmit_done ( struct efx_tx_queue * tx_queue , unsigned int index ) ;
2020-03-05 11:38:45 +00:00
void efx_xmit_done_single ( struct efx_tx_queue * tx_queue ) ;
2017-08-07 10:15:17 +02:00
int efx_setup_tc ( struct net_device * net_dev , enum tc_setup_type type ,
2017-08-07 10:15:32 +02:00
void * type_data ) ;
2013-06-28 21:47:12 +01:00
extern unsigned int efx_piobuf_size ;
2008-04-27 12:55:59 +01:00
/* RX */
2013-09-23 11:37:59 -07:00
void __efx_rx_packet ( struct efx_channel * channel ) ;
void efx_rx_packet ( struct efx_rx_queue * rx_queue , unsigned int index ,
unsigned int n_frags , unsigned int len , u16 flags ) ;
2013-01-29 23:33:14 +00:00
static inline void efx_rx_flush_packet ( struct efx_channel * channel )
{
2013-01-29 23:33:15 +00:00
if ( channel - > rx_pkt_n_frags )
2020-07-27 12:55:55 +01:00
INDIRECT_CALL_2 ( channel - > efx - > type - > rx_packet ,
__ef100_rx_packet , __efx_rx_packet ,
channel ) ;
2013-01-29 23:33:14 +00:00
}
2020-08-14 13:26:22 +01:00
static inline bool efx_rx_buf_hash_valid ( struct efx_nic * efx , const u8 * prefix )
{
if ( efx - > type - > rx_buf_hash_valid )
return INDIRECT_CALL_1 ( efx - > type - > rx_buf_hash_valid ,
ef100_rx_buf_hash_valid ,
prefix ) ;
return true ;
}
2010-09-10 06:42:22 +00:00
2012-07-30 15:57:44 +00:00
/* Maximum number of TCP segments we support for soft-TSO */
# define EFX_TSO_MAX_SEGS 100
/* The smallest [rt]xq_entries that the driver supports. RX minimum
* is a bit arbitrary . For TX , we must have space for at least 2
* TSO skbs .
*/
# define EFX_RXQ_MIN_ENT 128U
# define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
2010-09-10 06:42:33 +00:00
2017-04-25 13:44:54 +01:00
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
* other purposes when counting TxQ entries , so we halve the queue size .
*/
# define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
2014-01-23 14:35:48 +00:00
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE )
2015-12-11 09:39:32 +00:00
static inline bool efx_rss_enabled ( struct efx_nic * efx )
{
return efx - > rss_spread > 1 ;
}
2010-09-20 08:43:07 +00:00
/* Filters */
2012-11-08 01:46:53 +00:00
/**
* efx_filter_insert_filter - add or replace a filter
* @ efx : NIC in which to insert the filter
* @ spec : Specification for the filter
* @ replace_equal : Flag for whether the specified filter may replace an
* existing filter with equal priority
*
* On success , return the filter ID .
* On failure , return a negative error code .
*
2013-01-15 22:00:07 +00:00
* If existing filters have equal match values to the new filter spec ,
* then the new filter might replace them or the function might fail ,
* as follows .
*
* 1. If the existing filters have lower priority , or @ replace_equal
* is set and they have equal priority , replace them .
*
* 2. If the existing filters have higher priority , return - % EPERM .
*
* 3. If ! efx_filter_is_mc_recipient ( @ spec ) , or the NIC does not
* support delivery to multiple recipients , return - % EEXIST .
*
* This implies that filters for multiple multicast recipients must
* all be inserted with the same priority and @ replace_equal = % false .
2012-11-08 01:46:53 +00:00
*/
static inline s32 efx_filter_insert_filter ( struct efx_nic * efx ,
struct efx_filter_spec * spec ,
bool replace_equal )
{
return efx - > type - > filter_insert ( efx , spec , replace_equal ) ;
}
/**
* efx_filter_remove_id_safe - remove a filter by ID , carefully
* @ efx : NIC from which to remove the filter
* @ priority : Priority of filter , as passed to @ efx_filter_insert_filter
* @ filter_id : ID of filter , as returned by @ efx_filter_insert_filter
*
* This function will range - check @ filter_id , so it is safe to call
* with a value passed from userland .
*/
static inline int efx_filter_remove_id_safe ( struct efx_nic * efx ,
enum efx_filter_priority priority ,
u32 filter_id )
{
return efx - > type - > filter_remove_safe ( efx , priority , filter_id ) ;
}
/**
* efx_filter_get_filter_safe - retrieve a filter by ID , carefully
* @ efx : NIC from which to remove the filter
* @ priority : Priority of filter , as passed to @ efx_filter_insert_filter
* @ filter_id : ID of filter , as returned by @ efx_filter_insert_filter
* @ spec : Buffer in which to store filter specification
*
* This function will range - check @ filter_id , so it is safe to call
* with a value passed from userland .
*/
static inline int
efx_filter_get_filter_safe ( struct efx_nic * efx ,
enum efx_filter_priority priority ,
u32 filter_id , struct efx_filter_spec * spec )
{
return efx - > type - > filter_get_safe ( efx , priority , filter_id , spec ) ;
}
static inline u32 efx_filter_count_rx_used ( struct efx_nic * efx ,
enum efx_filter_priority priority )
{
return efx - > type - > filter_count_rx_used ( efx , priority ) ;
}
static inline u32 efx_filter_get_rx_id_limit ( struct efx_nic * efx )
{
return efx - > type - > filter_get_rx_id_limit ( efx ) ;
}
static inline s32 efx_filter_get_rx_ids ( struct efx_nic * efx ,
enum efx_filter_priority priority ,
u32 * buf , u32 size )
{
return efx - > type - > filter_get_rx_ids ( efx , priority , buf , size ) ;
}
2018-04-24 17:09:30 +01:00
2018-03-08 15:45:17 +00:00
/* RSS contexts */
static inline bool efx_rss_active ( struct efx_rss_context * ctx )
{
2020-01-10 13:28:45 +00:00
return ctx - > context_id ! = EFX_MCDI_RSS_CONTEXT_INVALID ;
2018-03-08 15:45:17 +00:00
}
2009-11-23 16:07:30 +00:00
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops ;
2008-04-27 12:55:59 +01:00
/* Global */
2016-08-11 13:02:09 +01:00
unsigned int efx_usecs_to_ticks ( struct efx_nic * efx , unsigned int usecs ) ;
unsigned int efx_ticks_to_usecs ( struct efx_nic * efx , unsigned int ticks ) ;
2013-09-23 11:37:59 -07:00
int efx_init_irq_moderation ( struct efx_nic * efx , unsigned int tx_usecs ,
unsigned int rx_usecs , bool rx_adaptive ,
bool rx_may_override_tx ) ;
void efx_get_irq_moderation ( struct efx_nic * efx , unsigned int * tx_usecs ,
unsigned int * rx_usecs , bool * rx_adaptive ) ;
2008-04-27 12:55:59 +01:00
2014-07-15 11:58:12 +01:00
/* Update the generic software stats in the passed stats array */
void efx_update_sw_stats ( struct efx_nic * efx , u64 * stats ) ;
2008-11-04 20:34:28 +00:00
/* MTD */
# ifdef CONFIG_SFC_MTD
2013-09-23 11:37:59 -07:00
int efx_mtd_add ( struct efx_nic * efx , struct efx_mtd_partition * parts ,
size_t n_parts , size_t sizeof_part ) ;
2012-11-28 04:38:14 +00:00
static inline int efx_mtd_probe ( struct efx_nic * efx )
{
return efx - > type - > mtd_probe ( efx ) ;
}
2013-09-23 11:37:59 -07:00
void efx_mtd_rename ( struct efx_nic * efx ) ;
void efx_mtd_remove ( struct efx_nic * efx ) ;
2008-11-04 20:34:28 +00:00
# else
static inline int efx_mtd_probe ( struct efx_nic * efx ) { return 0 ; }
static inline void efx_mtd_rename ( struct efx_nic * efx ) { }
static inline void efx_mtd_remove ( struct efx_nic * efx ) { }
# endif
2008-04-27 12:55:59 +01:00
2015-05-06 00:55:13 +01:00
# ifdef CONFIG_SFC_SRIOV
static inline unsigned int efx_vf_size ( struct efx_nic * efx )
{
return 1 < < efx - > vi_scale ;
}
# endif
2012-10-17 13:21:23 +01:00
static inline void efx_device_detach_sync ( struct efx_nic * efx )
{
struct net_device * dev = efx - > net_dev ;
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we ' re done and before
* netif_device_present ( ) becomes false .
*/
2013-03-05 01:03:47 +00:00
netif_tx_lock_bh ( dev ) ;
2012-10-17 13:21:23 +01:00
netif_device_detach ( dev ) ;
2013-03-05 01:03:47 +00:00
netif_tx_unlock_bh ( dev ) ;
2012-10-17 13:21:23 +01:00
}
2017-02-17 15:50:43 +00:00
static inline void efx_device_attach_if_not_resetting ( struct efx_nic * efx )
{
if ( ( efx - > state ! = STATE_DISABLED ) & & ! efx - > reset_pending )
netif_device_attach ( efx - > net_dev ) ;
}
2016-06-15 17:43:43 +01:00
static inline bool efx_rwsem_assert_write_locked ( struct rw_semaphore * sem )
{
if ( WARN_ON ( down_read_trylock ( sem ) ) ) {
up_read ( sem ) ;
return false ;
}
return true ;
}
2019-10-31 10:24:12 +00:00
int efx_xdp_tx_buffers ( struct efx_nic * efx , int n , struct xdp_frame * * xdpfs ,
bool flush ) ;
2008-04-27 12:55:59 +01:00
# endif /* EFX_EFX_H */