While it was convenient to have a generic ring structure that served both Tx and Rx sides, next commits are going to introduce several Tx-specific fields, so in order to avoid hurting the Rx side, let's pull out the Tx ring onto new ice_tx_ring and ice_rx_ring structs. Rx ring could be handled by the old ice_ring which would reduce the code churn within this patch, but this would make things asymmetric. Make the union out of the ring container within ice_q_vector so that it is possible to iterate over newly introduced ice_tx_ring. Remove the @size as it's only accessed from control path and it can be calculated pretty easily. Change definitions of ice_update_ring_stats and ice_fetch_u64_stats_per_ring so that they are ring agnostic and can be used for both Rx and Tx rings. Sizes of Rx and Tx ring structs are 256 and 192 bytes, respectively. In Rx ring xdp_rxq_info occupies its own cacheline, so it's the major difference now. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Tested-by: Gurucharan G <gurucharanx.g@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
84 lines
2.1 KiB
C
84 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright (C) 2019-2021, Intel Corporation. */
|
|
|
|
#ifndef _ICE_ESWITCH_H_
|
|
#define _ICE_ESWITCH_H_
|
|
|
|
#include <net/devlink.h>
|
|
|
|
#ifdef CONFIG_ICE_SWITCHDEV
|
|
void ice_eswitch_release(struct ice_pf *pf);
|
|
int ice_eswitch_configure(struct ice_pf *pf);
|
|
int ice_eswitch_rebuild(struct ice_pf *pf);
|
|
|
|
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
|
int
|
|
ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
|
struct netlink_ext_ack *extack);
|
|
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
|
|
|
|
void ice_eswitch_update_repr(struct ice_vsi *vsi);
|
|
|
|
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
|
|
|
|
struct net_device *
|
|
ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
|
|
union ice_32b_rx_flex_desc *rx_desc);
|
|
|
|
void ice_eswitch_set_target_vsi(struct sk_buff *skb,
|
|
struct ice_tx_offload_params *off);
|
|
netdev_tx_t
|
|
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
|
|
#else /* CONFIG_ICE_SWITCHDEV */
|
|
static inline void ice_eswitch_release(struct ice_pf *pf) { }
|
|
|
|
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
|
|
|
|
static inline void
|
|
ice_eswitch_set_target_vsi(struct sk_buff *skb,
|
|
struct ice_tx_offload_params *off) { }
|
|
|
|
static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
|
|
|
|
static inline int ice_eswitch_configure(struct ice_pf *pf)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static inline int ice_eswitch_rebuild(struct ice_pf *pf)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
|
{
|
|
return DEVLINK_ESWITCH_MODE_LEGACY;
|
|
}
|
|
|
|
static inline int
|
|
ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline struct net_device *
|
|
ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
|
|
union ice_32b_rx_flex_desc *rx_desc)
|
|
{
|
|
return rx_ring->netdev;
|
|
}
|
|
|
|
static inline netdev_tx_t
|
|
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
{
|
|
return NETDEV_TX_BUSY;
|
|
}
|
|
#endif /* CONFIG_ICE_SWITCHDEV */
|
|
#endif /* _ICE_ESWITCH_H_ */
|