Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says: ==================== Intel Wired LAN Driver Updates 2022-08-18 (ice) This series contains updates to ice driver only. Jesse and Anatolii add support for controlling FCS/CRC stripping via ethtool. Anirudh allows for 100M speeds on devices which support it. Sylwester removes ucast_shared field and the associated dead code related to it. Mikael removes non-inclusive language from the driver. * '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue: ice: remove non-inclusive language ice: Remove ucast_shared ice: Allow 100M speeds for some devices ice: Implement FCS/CRC and VLAN stripping co-existence policy ice: Implement control of FCS/CRC stripping ==================== Link: https://lore.kernel.org/r/20220818155207.996297-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
0134fe8512
@ -854,6 +854,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
|
||||
struct ice_q_stats stats, u64 *pkts, u64 *bytes);
|
||||
int ice_up(struct ice_vsi *vsi);
|
||||
int ice_down(struct ice_vsi *vsi);
|
||||
int ice_down_up(struct ice_vsi *vsi);
|
||||
int ice_vsi_cfg(struct ice_vsi *vsi);
|
||||
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
|
||||
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
|
||||
|
@ -417,7 +417,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
|
||||
/* Strip the Ethernet CRC bytes before the packet is posted to host
|
||||
* memory.
|
||||
*/
|
||||
rlan_ctx.crcstrip = 1;
|
||||
rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
|
||||
|
||||
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
|
||||
* and it needs to remain 1 for non-DVM capable configurations to not
|
||||
|
@ -2775,6 +2775,26 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
|
||||
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_is_100m_speed_supported
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* returns true if 100M speeds are supported by the device,
|
||||
* false otherwise.
|
||||
*/
|
||||
bool ice_is_100m_speed_supported(struct ice_hw *hw)
|
||||
{
|
||||
switch (hw->device_id) {
|
||||
case ICE_DEV_ID_E822C_SGMII:
|
||||
case ICE_DEV_ID_E822L_SGMII:
|
||||
case ICE_DEV_ID_E823L_1GBE:
|
||||
case ICE_DEV_ID_E823C_SGMII:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_link_speed_based_on_phy_type - returns link speed
|
||||
* @phy_type_low: lower part of phy_type
|
||||
|
@ -204,6 +204,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
|
||||
int
|
||||
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
|
||||
bool *value, struct ice_sq_cd *cd);
|
||||
bool ice_is_100m_speed_supported(struct ice_hw *hw);
|
||||
int
|
||||
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
|
||||
struct ice_sq_cd *cd);
|
||||
|
@ -1289,10 +1289,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
}
|
||||
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
|
||||
/* down and up VSI so that changes of Rx cfg are reflected. */
|
||||
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
|
||||
ice_down(vsi);
|
||||
ice_up(vsi);
|
||||
}
|
||||
ice_down_up(vsi);
|
||||
}
|
||||
/* don't allow modification of this flag when a single VF is in
|
||||
* promiscuous mode because it's not supported
|
||||
@ -1473,20 +1470,22 @@ ice_get_ethtool_stats(struct net_device *netdev,
|
||||
|
||||
/**
|
||||
* ice_mask_min_supported_speeds
|
||||
* @hw: pointer to the HW structure
|
||||
* @phy_types_high: PHY type high
|
||||
* @phy_types_low: PHY type low to apply minimum supported speeds mask
|
||||
*
|
||||
* Apply minimum supported speeds mask to PHY type low. These are the speeds
|
||||
* for ethtool supported link mode.
|
||||
*/
|
||||
static
|
||||
void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
|
||||
static void
|
||||
ice_mask_min_supported_speeds(struct ice_hw *hw,
|
||||
u64 phy_types_high, u64 *phy_types_low)
|
||||
{
|
||||
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
|
||||
if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
|
||||
phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
|
||||
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
|
||||
else
|
||||
else if (!ice_is_100m_speed_supported(hw))
|
||||
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
|
||||
}
|
||||
|
||||
@ -1536,7 +1535,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
|
||||
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
|
||||
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
|
||||
|
||||
ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
|
||||
ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
|
||||
&phy_types_low);
|
||||
/* determine advertised modes based on link override only
|
||||
* if it's supported and if the FW doesn't abstract the
|
||||
* driver from having to account for link overrides
|
||||
|
@ -61,13 +61,13 @@ static void ice_lag_set_backup(struct ice_lag *lag)
|
||||
*/
|
||||
static void ice_display_lag_info(struct ice_lag *lag)
|
||||
{
|
||||
const char *name, *peer, *upper, *role, *bonded, *master;
|
||||
const char *name, *peer, *upper, *role, *bonded, *primary;
|
||||
struct device *dev = &lag->pf->pdev->dev;
|
||||
|
||||
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
|
||||
peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
|
||||
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
|
||||
master = lag->master ? "TRUE" : "FALSE";
|
||||
primary = lag->primary ? "TRUE" : "FALSE";
|
||||
bonded = lag->bonded ? "BONDED" : "UNBONDED";
|
||||
|
||||
switch (lag->role) {
|
||||
@ -87,8 +87,8 @@ static void ice_display_lag_info(struct ice_lag *lag)
|
||||
role = "ERROR";
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
|
||||
bonded, peer, upper, role, master);
|
||||
dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
|
||||
bonded, peer, upper, role, primary);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -119,7 +119,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
||||
}
|
||||
|
||||
if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
|
||||
netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
|
||||
netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
|
||||
goto lag_out;
|
||||
}
|
||||
|
||||
@ -164,8 +164,8 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
|
||||
lag->bonded = true;
|
||||
lag->role = ICE_LAG_UNSET;
|
||||
|
||||
/* if this is the first element in an LAG mark as master */
|
||||
lag->master = !!(peers == 1);
|
||||
/* if this is the first element in an LAG mark as primary */
|
||||
lag->primary = !!(peers == 1);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -264,7 +264,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
|
||||
netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
|
||||
|
||||
if (!netif_is_lag_master(info->upper_dev)) {
|
||||
netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
|
||||
netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ struct ice_lag {
|
||||
struct net_device *upper_netdev; /* upper bonding netdev */
|
||||
struct notifier_block notif_block;
|
||||
u8 bonded:1; /* currently bonded */
|
||||
u8 master:1; /* this is a master */
|
||||
u8 primary:1; /* this is primary */
|
||||
u8 handler:1; /* did we register a rx_netdev_handler */
|
||||
/* each thing blocking bonding will increment this value by one.
|
||||
* If this value is zero, then bonding is allowed.
|
||||
|
@ -1562,6 +1562,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
|
||||
kfree(lut);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
|
||||
* @vsi: VSI to be configured
|
||||
* @disable: set to true to have FCS / CRC in the frame data
|
||||
*/
|
||||
void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
|
||||
{
|
||||
int i;
|
||||
|
||||
ice_for_each_rxq(vsi, i)
|
||||
if (disable)
|
||||
vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
|
||||
else
|
||||
vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
|
||||
* @vsi: VSI to be configured
|
||||
@ -3277,6 +3293,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
||||
*/
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
||||
ice_vsi_cfg_rss_lut_key(vsi);
|
||||
|
||||
/* disable or enable CRC stripping */
|
||||
if (vsi->netdev)
|
||||
ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
|
||||
NETIF_F_RXFCS));
|
||||
|
||||
break;
|
||||
case ICE_VSI_VF:
|
||||
ret = ice_vsi_alloc_q_vectors(vsi);
|
||||
|
@ -89,6 +89,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
|
||||
|
||||
void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
|
||||
|
||||
void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
|
||||
|
||||
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
|
||||
|
@ -3376,6 +3376,11 @@ static void ice_set_netdev_features(struct net_device *netdev)
|
||||
if (is_dvm_ena)
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
/* Leave CRC / FCS stripping enabled by default, but allow the value to
|
||||
* be changed at runtime
|
||||
*/
|
||||
netdev->hw_features |= NETIF_F_RXFCS;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4667,8 +4672,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
ice_set_safe_mode_caps(hw);
|
||||
}
|
||||
|
||||
hw->ucast_shared = true;
|
||||
|
||||
err = ice_init_pf(pf);
|
||||
if (err) {
|
||||
dev_err(dev, "ice_init_pf failed: %d\n", err);
|
||||
@ -5727,6 +5730,9 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
|
||||
NETIF_F_HW_VLAN_STAG_RX | \
|
||||
NETIF_F_HW_VLAN_STAG_TX)
|
||||
|
||||
#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
|
||||
NETIF_F_HW_VLAN_STAG_RX)
|
||||
|
||||
#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
|
||||
NETIF_F_HW_VLAN_STAG_FILTER)
|
||||
|
||||
@ -5813,6 +5819,14 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
}
|
||||
|
||||
if (!(netdev->features & NETIF_F_RXFCS) &&
|
||||
(features & NETIF_F_RXFCS) &&
|
||||
(features & NETIF_VLAN_STRIPPING_FEATURES) &&
|
||||
!ice_vsi_has_non_zero_vlans(np->vsi)) {
|
||||
netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
|
||||
features &= ~NETIF_VLAN_STRIPPING_FEATURES;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
@ -5906,6 +5920,13 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
|
||||
current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
|
||||
requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
|
||||
if (current_vlan_features ^ requested_vlan_features) {
|
||||
if ((features & NETIF_F_RXFCS) &&
|
||||
(features & NETIF_VLAN_STRIPPING_FEATURES)) {
|
||||
dev_err(ice_pf_to_dev(vsi->back),
|
||||
"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
err = ice_set_vlan_offload_features(vsi, features);
|
||||
if (err)
|
||||
return err;
|
||||
@ -5987,6 +6008,23 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Turn on receive of FCS aka CRC, and after setting this
|
||||
* flag the packet data will have the 4 byte CRC appended
|
||||
*/
|
||||
if (changed & NETIF_F_RXFCS) {
|
||||
if ((features & NETIF_F_RXFCS) &&
|
||||
(features & NETIF_VLAN_STRIPPING_FEATURES)) {
|
||||
dev_err(ice_pf_to_dev(vsi->back),
|
||||
"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
|
||||
ret = ice_down_up(vsi);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (changed & NETIF_F_NTUPLE) {
|
||||
bool ena = !!(features & NETIF_F_NTUPLE);
|
||||
|
||||
@ -6690,6 +6728,31 @@ int ice_down(struct ice_vsi *vsi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_down_up - shutdown the VSI connection and bring it up
|
||||
* @vsi: the VSI to be reconnected
|
||||
*/
|
||||
int ice_down_up(struct ice_vsi *vsi)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* if DOWN already set, nothing to do */
|
||||
if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
|
||||
return 0;
|
||||
|
||||
ret = ice_down(vsi);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ice_up(vsi);
|
||||
if (ret) {
|
||||
netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
|
||||
* @vsi: VSI having resources allocated
|
||||
|
@ -3449,31 +3449,15 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
|
||||
* ice_add_mac - Add a MAC address based filter rule
|
||||
* @hw: pointer to the hardware structure
|
||||
* @m_list: list of MAC addresses and forwarding information
|
||||
*
|
||||
* IMPORTANT: When the ucast_shared flag is set to false and m_list has
|
||||
* multiple unicast addresses, the function assumes that all the
|
||||
* addresses are unique in a given add_mac call. It doesn't
|
||||
* check for duplicates in this case, removing duplicates from a given
|
||||
* list should be taken care of in the caller of this function.
|
||||
*/
|
||||
int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
|
||||
{
|
||||
struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
|
||||
struct ice_fltr_list_entry *m_list_itr;
|
||||
struct list_head *rule_head;
|
||||
u16 total_elem_left, s_rule_size;
|
||||
struct ice_switch_info *sw;
|
||||
struct mutex *rule_lock; /* Lock to protect filter rule list */
|
||||
u16 num_unicast = 0;
|
||||
int status = 0;
|
||||
u8 elem_sent;
|
||||
|
||||
if (!m_list || !hw)
|
||||
return -EINVAL;
|
||||
|
||||
s_rule = NULL;
|
||||
sw = hw->switch_info;
|
||||
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
|
||||
list_for_each_entry(m_list_itr, m_list, list_entry) {
|
||||
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
|
||||
u16 vsi_handle;
|
||||
@ -3492,106 +3476,13 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
|
||||
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
|
||||
is_zero_ether_addr(add))
|
||||
return -EINVAL;
|
||||
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
|
||||
/* Don't overwrite the unicast address */
|
||||
mutex_lock(rule_lock);
|
||||
if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
|
||||
&m_list_itr->fltr_info)) {
|
||||
mutex_unlock(rule_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
mutex_unlock(rule_lock);
|
||||
num_unicast++;
|
||||
} else if (is_multicast_ether_addr(add) ||
|
||||
(is_unicast_ether_addr(add) && hw->ucast_shared)) {
|
||||
m_list_itr->status =
|
||||
ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
|
||||
m_list_itr);
|
||||
if (m_list_itr->status)
|
||||
return m_list_itr->status;
|
||||
}
|
||||
|
||||
m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
|
||||
m_list_itr);
|
||||
if (m_list_itr->status)
|
||||
return m_list_itr->status;
|
||||
}
|
||||
|
||||
mutex_lock(rule_lock);
|
||||
/* Exit if no suitable entries were found for adding bulk switch rule */
|
||||
if (!num_unicast) {
|
||||
status = 0;
|
||||
goto ice_add_mac_exit;
|
||||
}
|
||||
|
||||
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
|
||||
|
||||
/* Allocate switch rule buffer for the bulk update for unicast */
|
||||
s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
|
||||
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
|
||||
GFP_KERNEL);
|
||||
if (!s_rule) {
|
||||
status = -ENOMEM;
|
||||
goto ice_add_mac_exit;
|
||||
}
|
||||
|
||||
r_iter = s_rule;
|
||||
list_for_each_entry(m_list_itr, m_list, list_entry) {
|
||||
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
|
||||
u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
|
||||
|
||||
if (is_unicast_ether_addr(mac_addr)) {
|
||||
ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
|
||||
ice_aqc_opc_add_sw_rules);
|
||||
r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Call AQ bulk switch rule update for all unicast addresses */
|
||||
r_iter = s_rule;
|
||||
/* Call AQ switch rule in AQ_MAX chunk */
|
||||
for (total_elem_left = num_unicast; total_elem_left > 0;
|
||||
total_elem_left -= elem_sent) {
|
||||
struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
|
||||
|
||||
elem_sent = min_t(u8, total_elem_left,
|
||||
(ICE_AQ_MAX_BUF_LEN / s_rule_size));
|
||||
status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
|
||||
elem_sent, ice_aqc_opc_add_sw_rules,
|
||||
NULL);
|
||||
if (status)
|
||||
goto ice_add_mac_exit;
|
||||
r_iter = (typeof(s_rule))
|
||||
((u8 *)r_iter + (elem_sent * s_rule_size));
|
||||
}
|
||||
|
||||
/* Fill up rule ID based on the value returned from FW */
|
||||
r_iter = s_rule;
|
||||
list_for_each_entry(m_list_itr, m_list, list_entry) {
|
||||
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
|
||||
u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
|
||||
struct ice_fltr_mgmt_list_entry *fm_entry;
|
||||
|
||||
if (is_unicast_ether_addr(mac_addr)) {
|
||||
f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
|
||||
f_info->fltr_act = ICE_FWD_TO_VSI;
|
||||
/* Create an entry to track this MAC address */
|
||||
fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
|
||||
sizeof(*fm_entry), GFP_KERNEL);
|
||||
if (!fm_entry) {
|
||||
status = -ENOMEM;
|
||||
goto ice_add_mac_exit;
|
||||
}
|
||||
fm_entry->fltr_info = *f_info;
|
||||
fm_entry->vsi_count = 1;
|
||||
/* The book keeping entries will get removed when
|
||||
* base driver calls remove filter AQ command
|
||||
*/
|
||||
|
||||
list_add(&fm_entry->list_entry, rule_head);
|
||||
r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
|
||||
}
|
||||
}
|
||||
|
||||
ice_add_mac_exit:
|
||||
mutex_unlock(rule_lock);
|
||||
if (s_rule)
|
||||
devm_kfree(ice_hw_to_dev(hw), s_rule);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -3978,38 +3869,6 @@ ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
|
||||
* @hw: pointer to the hardware structure
|
||||
* @recp_id: lookup type for which the specified rule needs to be searched
|
||||
* @f_info: rule information
|
||||
*
|
||||
* Helper function to search for a unicast rule entry - this is to be used
|
||||
* to remove unicast MAC filter that is not shared with other VSIs on the
|
||||
* PF switch.
|
||||
*
|
||||
* Returns pointer to entry storing the rule if found
|
||||
*/
|
||||
static struct ice_fltr_mgmt_list_entry *
|
||||
ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
|
||||
struct ice_fltr_info *f_info)
|
||||
{
|
||||
struct ice_switch_info *sw = hw->switch_info;
|
||||
struct ice_fltr_mgmt_list_entry *list_itr;
|
||||
struct list_head *list_head;
|
||||
|
||||
list_head = &sw->recp_list[recp_id].filt_rules;
|
||||
list_for_each_entry(list_itr, list_head, list_entry) {
|
||||
if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
|
||||
sizeof(f_info->l_data)) &&
|
||||
f_info->fwd_id.hw_vsi_id ==
|
||||
list_itr->fltr_info.fwd_id.hw_vsi_id &&
|
||||
f_info->flag == list_itr->fltr_info.flag)
|
||||
return list_itr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_remove_mac - remove a MAC address based filter rule
|
||||
* @hw: pointer to the hardware structure
|
||||
@ -4026,15 +3885,12 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
|
||||
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
|
||||
{
|
||||
struct ice_fltr_list_entry *list_itr, *tmp;
|
||||
struct mutex *rule_lock; /* Lock to protect filter rule list */
|
||||
|
||||
if (!m_list)
|
||||
return -EINVAL;
|
||||
|
||||
rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
|
||||
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
|
||||
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
|
||||
u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
|
||||
u16 vsi_handle;
|
||||
|
||||
if (l_type != ICE_SW_LKUP_MAC)
|
||||
@ -4046,19 +3902,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
|
||||
|
||||
list_itr->fltr_info.fwd_id.hw_vsi_id =
|
||||
ice_get_hw_vsi_num(hw, vsi_handle);
|
||||
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
|
||||
/* Don't remove the unicast address that belongs to
|
||||
* another VSI on the switch, since it is not being
|
||||
* shared...
|
||||
*/
|
||||
mutex_lock(rule_lock);
|
||||
if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
|
||||
&list_itr->fltr_info)) {
|
||||
mutex_unlock(rule_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
mutex_unlock(rule_lock);
|
||||
}
|
||||
|
||||
list_itr->status = ice_remove_rule_internal(hw,
|
||||
ICE_SW_LKUP_MAC,
|
||||
list_itr);
|
||||
|
@ -295,10 +295,11 @@ struct ice_rx_ring {
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma; /* physical address of ring */
|
||||
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
|
||||
u64 cached_phctime;
|
||||
u8 dcb_tc; /* Traffic class of ring */
|
||||
u8 ptp_rx;
|
||||
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
|
||||
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
|
||||
u8 flags;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
|
@ -885,8 +885,6 @@ struct ice_hw {
|
||||
/* INTRL granularity in 1 us */
|
||||
u8 intrl_gran;
|
||||
|
||||
u8 ucast_shared; /* true if VSIs can share unicast addr */
|
||||
|
||||
#define ICE_PHY_PER_NAC 1
|
||||
#define ICE_MAX_QUAD 2
|
||||
#define ICE_NUM_QUAD_TYPE 2
|
||||
|
Loading…
x
Reference in New Issue
Block a user