Merge branch 'support-symmetric-xor-rss-hash'

Ahmed Zaki says:

====================
Support symmetric-xor RSS hash

Patches 1 and 2 modify the get/set_rxh ethtool API to take a pointer to
struct of parameters instead of individual params. This will allow future
changes to the uAPI-shared struct ethtool_rxfh without changing the
drivers' API.

Patch 3 adds the support at the Kernel level, allowing the user to set a
symmetric-xor RSS hash for a netdevice via:

    # ethtool -X eth0 hfunc toeplitz symmetric-xor

and clears the flag via:

    # ethtool -X eth0 hfunc toeplitz

The "symmetric-xor" is set in a new "input_xfrm" field in struct
ethtool_rxfh. Support for the new "symmetric-xor" flag will be later sent
to the "ethtool" user-space tool.

Patch 4 fixes a long standing bug with the ice hash function register
values. The bug has been benign for now since only (asymmetric) Toeplitz
hash (Zero) has been used.

Patches 5 and 6 lay some groundwork refactoring. While the first is
mainly cosmetic, the second is needed since there is no more room in the
previous 64-bit RSS profile ID for the symmetric attribute introduced in
the next patch.

Finally, patches 7 and 8 add the symmetric-xor support for the ice
(E800 PFs) and the iAVF drivers.
====================

Link: https://lore.kernel.org/r/20231213003321.605376-1-ahmed.zaki@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-12-13 22:07:19 -08:00
commit 04c04725c1
79 changed files with 1819 additions and 1240 deletions

View File

@ -908,6 +908,9 @@ attribute-sets:
-
name: hkey
type: binary
-
name: input_xfrm
type: u32
-
name: plca
attributes:
@ -1598,6 +1601,7 @@ operations:
- hfunc
- indir
- hkey
- input_xfrm
dump: *rss-get-op
-
name: plca-get-cfg

View File

@ -1774,12 +1774,16 @@ Kernel response contents:
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
``ETHTOOL_A_RSS_INPUT_XFRM`` u32 RSS input data transformation
===================================== ====== ==========================
ETHTOOL_A_RSS_HFUNC attribute is bitmap indicating the hash function
being used. Current supported options are toeplitz, xor or crc32.
ETHTOOL_A_RSS_INDIR attribute returns RSS indrection table where each byte
ETHTOOL_A_RSS_INDIR attribute returns RSS indirection table where each byte
indicates queue number.
ETHTOOL_A_RSS_INPUT_XFRM attribute is a bitmap indicating the type of
transformation applied to the input protocol fields before given to the RSS
hfunc. Current supported option is symmetric-xor.
PLCA_GET_CFG
============

View File

@ -44,6 +44,21 @@ by masking out the low order seven bits of the computed hash for the
packet (usually a Toeplitz hash), taking this number as a key into the
indirection table and reading the corresponding value.
Some NICs support symmetric RSS hashing where, if the IP (source address,
destination address) and TCP/UDP (source port, destination port) tuples
are swapped, the computed hash is the same. This is beneficial in some
applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need
both directions of the flow to land on the same Rx queue (and CPU). The
"Symmetric-XOR" is a type of RSS algorithms that achieves this hash
symmetry by XORing the input source and destination fields of the IP
and/or L4 protocols. This, however, results in reduced input entropy and
could potentially be exploited. Specifically, the algorithm XORs the input
as follows::
# (SRC_IP ^ DST_IP, SRC_IP ^ DST_IP, SRC_PORT ^ DST_PORT, SRC_PORT ^ DST_PORT)
The result is then fed to the underlying RSS algorithm.
Some advanced NICs allow steering packets to queues based on
programmable filters. For example, webserver bound TCP port 80 packets
can be directed to their own receive queue. Such “n-tuple” filters can

View File

@ -802,15 +802,15 @@ static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
return rc;
}
static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int ena_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct ena_adapter *adapter = netdev_priv(netdev);
enum ena_admin_hash_functions ena_func;
u8 func;
int rc;
rc = ena_indirection_table_get(adapter, indir);
rc = ena_indirection_table_get(adapter, rxfh->indir);
if (rc)
return rc;
@ -825,7 +825,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return rc;
}
rc = ena_com_get_hash_key(adapter->ena_dev, key);
rc = ena_com_get_hash_key(adapter->ena_dev, rxfh->key);
if (rc)
return rc;
@ -842,27 +842,27 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return -EOPNOTSUPP;
}
if (hfunc)
*hfunc = func;
rxfh->hfunc = func;
return 0;
}
static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int ena_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_hash_functions func = 0;
int rc;
if (indir) {
rc = ena_indirection_table_set(adapter, indir);
if (rxfh->indir) {
rc = ena_indirection_table_set(adapter, rxfh->indir);
if (rc)
return rc;
}
switch (hfunc) {
switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
func = ena_com_get_current_hash_function(ena_dev);
break;
@ -874,12 +874,12 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
break;
default:
netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
hfunc);
rxfh->hfunc);
return -EOPNOTSUPP;
}
if (key || func) {
rc = ena_com_fill_hash_function(ena_dev, func, key,
if (rxfh->key || func) {
rc = ena_com_fill_hash_function(ena_dev, func, rxfh->key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
if (unlikely(rc)) {

View File

@ -527,47 +527,48 @@ static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
return ARRAY_SIZE(pdata->rss_table);
}
static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int xgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
MAC_RSSDR, DMCH);
rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
MAC_RSSDR, DMCH);
}
if (key)
memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
if (rxfh->key)
memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int xgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "unsupported hash function\n");
return -EOPNOTSUPP;
}
if (indir) {
ret = hw_if->set_rss_lookup_table(pdata, indir);
if (rxfh->indir) {
ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
if (ret)
return ret;
}
if (key) {
ret = hw_if->set_rss_hash_key(pdata, key);
if (rxfh->key) {
ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
if (ret)
return ret;
}

View File

@ -447,8 +447,8 @@ static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
return sizeof(cfg->aq_rss.hash_secret_key);
}
static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
static int aq_ethtool_get_rss(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@ -456,21 +456,21 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
cfg = aq_nic_get_cfg(aq_nic);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (rxfh->indir) {
for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
indir[i] = cfg->aq_rss.indirection_table[i];
rxfh->indir[i] = cfg->aq_rss.indirection_table[i];
}
if (key)
memcpy(key, cfg->aq_rss.hash_secret_key,
if (rxfh->key)
memcpy(rxfh->key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
return 0;
}
static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int aq_ethtool_set_rss(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(netdev);
struct aq_nic_cfg_s *cfg;
@ -482,16 +482,17 @@ static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
rss_entries = cfg->aq_rss.indirection_table_size;
/* We do not allow change in unsupported parameters */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
if (indir)
if (rxfh->indir)
for (i = 0; i < rss_entries; i++)
cfg->aq_rss.indirection_table[i] = indir[i];
cfg->aq_rss.indirection_table[i] = rxfh->indir[i];
/* Fill out the rss hash key */
if (key) {
memcpy(cfg->aq_rss.hash_secret_key, key,
if (rxfh->key) {
memcpy(cfg->aq_rss.hash_secret_key, rxfh->key,
sizeof(cfg->aq_rss.hash_secret_key));
err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
&cfg->aq_rss);

View File

@ -3486,16 +3486,15 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
static int bnx2x_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!rxfh->indir)
return 0;
/* Get the current configuration of the RSS indirection table */
@ -3511,13 +3510,14 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
* queue.
*/
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
indir[i] = ind_table[i] - bp->fp->cl_id;
rxfh->indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int bnx2x_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
@ -3525,11 +3525,12 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
if (!rxfh->indir)
return 0;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
@ -3542,7 +3543,7 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
* align the received table to the Client ID of the leading RSS
* queue
*/
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
bp->rss_conf_obj.ind_table[i] = rxfh->indir[i] + bp->fp->cl_id;
}
if (bp->state == BNX2X_STATE_OPEN)

View File

@ -1333,49 +1333,49 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
return HW_HASH_KEY_SIZE;
}
static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!bp->vnic_info)
return 0;
vnic = &bp->vnic_info[0];
if (indir && bp->rss_indir_tbl) {
if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
indir[i] = bp->rss_indir_tbl[i];
rxfh->indir[i] = bp->rss_indir_tbl[i];
}
if (key && vnic->rss_hash_key)
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
if (rxfh->key && vnic->rss_hash_key)
memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
if (hfunc && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (key)
if (rxfh->key)
return -EOPNOTSUPP;
if (indir) {
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
bp->rss_indir_tbl[i] = indir[i];
bp->rss_indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));

View File

@ -12745,24 +12745,23 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
{
struct tg3 *tp = netdev_priv(dev);
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
indir[i] = tp->rss_ind_tbl[i];
rxfh->indir[i] = tp->rss_ind_tbl[i];
return 0;
}
static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
const u8 hfunc)
static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@ -12770,15 +12769,16 @@ static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
tp->rss_ind_tbl[i] = indir[i];
tp->rss_ind_tbl[i] = rxfh->indir[i];
if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
return 0;

View File

@ -653,35 +653,36 @@ static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
return nic->rss_info.rss_size;
}
static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
u8 *hfunc)
static int nicvf_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
if (indir) {
if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
indir[idx] = rss->ind_tbl[idx];
rxfh->indir[idx] = rss->ind_tbl[idx];
}
if (hkey)
memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
if (rxfh->key)
memcpy(rxfh->key, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *hkey, const u8 hfunc)
static int nicvf_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!rss->enable) {
@ -690,13 +691,13 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
if (indir) {
if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = indir[idx];
rss->ind_tbl[idx] = rxfh->indir[idx];
}
if (hkey) {
memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
if (rxfh->key) {
memcpy(rss->key, rxfh->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
}

View File

@ -1588,22 +1588,23 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
static int get_rss_table(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!p)
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!rxfh->indir)
return 0;
while (n--)
p[n] = pi->rss[n];
rxfh->indir[n] = pi->rss[n];
return 0;
}
static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
const u8 hfunc)
static int set_rss_table(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@ -1611,16 +1612,17 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!p)
if (!rxfh->indir)
return 0;
/* Interface must be brought up atleast once */
if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
pi->rss[i] = rxfh->indir[i];
return cxgb4_write_rss(pi, pi->rss);
}

View File

@ -568,31 +568,32 @@ static u32 enic_get_rxfh_key_size(struct net_device *netdev)
return ENIC_RSS_LEN;
}
static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
u8 *hfunc)
static int enic_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct enic *enic = netdev_priv(netdev);
if (hkey)
memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
if (rxfh->key)
memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *hkey, const u8 hfunc)
static int enic_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
indir)
if (rxfh->indir ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
if (hkey)
memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
if (rxfh->key)
memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
return __enic_set_rsskey(enic);
}

View File

@ -1271,43 +1271,45 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev)
return RSS_HASH_KEY_LEN;
}
static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
u8 *hfunc)
static int be_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i;
struct rss_info *rss = &adapter->rss_info;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
indir[i] = rss->rss_queue[i];
rxfh->indir[i] = rss->rss_queue[i];
}
if (hkey)
memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
if (rxfh->key)
memcpy(rxfh->key, rss->rss_hkey, RSS_HASH_KEY_LEN);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *hkey, const u8 hfunc)
static int be_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
int rc = 0, i, j;
struct be_adapter *adapter = netdev_priv(netdev);
u8 *hkey = rxfh->key;
u8 rsstable[RSS_INDIR_TABLE_LEN];
/* We do not allow change in unsupported parameters */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
if (rxfh->indir) {
struct be_rx_obj *rxo;
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
j = indir[i];
j = rxfh->indir[i];
rxo = &adapter->rx_obj[j];
rsstable[i] = rxo->rss_id;
adapter->rss_info.rss_queue[i] = j;

View File

@ -690,25 +690,26 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
static int enetc_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0, i;
/* return hash function */
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
if (key && hw->port)
if (rxfh->key && hw->port)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
ENETC_PRSSK(i));
/* return RSS table */
if (indir)
err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
if (rxfh->indir)
err = enetc_get_rss_table(priv->si, rxfh->indir,
priv->si->num_rss);
return err;
}
@ -722,20 +723,22 @@ void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int enetc_set_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0;
/* set hash key, if PF */
if (key && hw->port)
enetc_set_rss_key(hw, key);
if (rxfh->key && hw->port)
enetc_set_rss_key(hw, rxfh->key);
/* set RSS table */
if (indir)
err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
if (rxfh->indir)
err = enetc_set_rss_table(priv->si, rxfh->indir,
priv->si->num_rss);
return err;
}

View File

@ -977,44 +977,44 @@ static u32 fun_get_rxfh_key_size(struct net_device *netdev)
return sizeof(fp->rss_key);
}
static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int fun_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
const struct funeth_priv *fp = netdev_priv(netdev);
if (!fp->rss_cfg)
return -EOPNOTSUPP;
if (indir)
memcpy(indir, fp->indir_table,
if (rxfh->indir)
memcpy(rxfh->indir, fp->indir_table,
sizeof(u32) * fp->indir_table_nentries);
if (key)
memcpy(key, fp->rss_key, sizeof(fp->rss_key));
if (rxfh->key)
memcpy(rxfh->key, fp->rss_key, sizeof(fp->rss_key));
if (hfunc)
*hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
rxfh->hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
return 0;
}
static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int fun_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct funeth_priv *fp = netdev_priv(netdev);
const u32 *rss_indir = indir ? indir : fp->indir_table;
const u8 *rss_key = key ? key : fp->rss_key;
const u32 *rss_indir = rxfh->indir ? rxfh->indir : fp->indir_table;
const u8 *rss_key = rxfh->key ? rxfh->key : fp->rss_key;
enum fun_eth_hash_alg algo;
if (!fp->rss_cfg)
return -EOPNOTSUPP;
if (hfunc == ETH_RSS_HASH_NO_CHANGE)
if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
algo = fp->hash_algo;
else if (hfunc == ETH_RSS_HASH_CRC32)
else if (rxfh->hfunc == ETH_RSS_HASH_CRC32)
algo = FUN_ETH_RSS_ALG_CRC32;
else if (hfunc == ETH_RSS_HASH_TOP)
else if (rxfh->hfunc == ETH_RSS_HASH_TOP)
algo = FUN_ETH_RSS_ALG_TOEPLITZ;
else
return -EINVAL;
@ -1031,10 +1031,10 @@ static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
}
fp->hash_algo = algo;
if (key)
memcpy(fp->rss_key, key, sizeof(fp->rss_key));
if (indir)
memcpy(fp->indir_table, indir,
if (rxfh->key)
memcpy(fp->rss_key, rxfh->key, sizeof(fp->rss_key));
if (rxfh->indir)
memcpy(fp->indir_table, rxfh->indir,
sizeof(u32) * fp->indir_table_nentries);
return 0;
}

View File

@ -1186,7 +1186,7 @@ hns_get_rss_indir_size(struct net_device *netdev)
}
static int
hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
hns_get_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
@ -1199,15 +1199,16 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
ops = priv->ae_handle->dev->ops;
if (!indir)
if (!rxfh->indir)
return 0;
return ops->get_rss(priv->ae_handle, indir, key, hfunc);
return ops->get_rss(priv->ae_handle,
rxfh->indir, rxfh->key, &rxfh->hfunc);
}
static int
hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
const u8 hfunc)
hns_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
@ -1220,12 +1221,14 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
}
return ops->set_rss(priv->ae_handle, indir, key, hfunc);
return ops->set_rss(priv->ae_handle,
rxfh->indir, rxfh->key, rxfh->hfunc);
}
static int hns_get_rxnfc(struct net_device *netdev,

View File

@ -941,19 +941,21 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
return ae_dev->dev_specs.rss_ind_tbl_size;
}
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int hns3_get_rss(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
return h->ae_algo->ops->get_rss(h, rxfh->indir, rxfh->key,
&rxfh->hfunc);
}
static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int hns3_set_rss(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
@ -962,19 +964,22 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
return -EOPNOTSUPP;
if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
rxfh->hfunc != ETH_RSS_HASH_TOP) ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP &&
rxfh->hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
if (!indir) {
if (!rxfh->indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
return -EOPNOTSUPP;
}
return h->ae_algo->ops->set_rss(h, indir, key, hfunc);
return h->ae_algo->ops->set_rss(h, rxfh->indir, rxfh->key,
rxfh->hfunc);
}
static int hns3_get_rxnfc(struct net_device *netdev,

View File

@ -1137,7 +1137,7 @@ static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
static int hinic_get_rxfh(struct net_device *netdev,
u32 *indir, u8 *key, u8 *hfunc)
struct ethtool_rxfh_param *rxfh)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u8 hash_engine_type = 0;
@ -1146,32 +1146,33 @@ static int hinic_get_rxfh(struct net_device *netdev,
if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
if (hfunc) {
err = hinic_rss_get_hash_engine(nic_dev,
nic_dev->rss_tmpl_idx,
&hash_engine_type);
if (err)
return -EFAULT;
err = hinic_rss_get_hash_engine(nic_dev,
nic_dev->rss_tmpl_idx,
&hash_engine_type);
if (err)
return -EFAULT;
*hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
}
rxfh->hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
if (indir) {
if (rxfh->indir) {
err = hinic_rss_get_indir_tbl(nic_dev,
nic_dev->rss_tmpl_idx, indir);
nic_dev->rss_tmpl_idx,
rxfh->indir);
if (err)
return -EFAULT;
}
if (key)
if (rxfh->key)
err = hinic_rss_get_template_tbl(nic_dev,
nic_dev->rss_tmpl_idx, key);
nic_dev->rss_tmpl_idx,
rxfh->key);
return err;
}
static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int hinic_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
@ -1179,11 +1180,12 @@ static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
if (rxfh->hfunc != ETH_RSS_HASH_TOP &&
rxfh->hfunc != ETH_RSS_HASH_XOR)
return -EOPNOTSUPP;
nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
nic_dev->rss_hash_engine = (rxfh->hfunc == ETH_RSS_HASH_XOR) ?
HINIC_RSS_HASH_ENGINE_TYPE_XOR :
HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
err = hinic_rss_set_hash_engine
@ -1193,7 +1195,7 @@ static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
return -EFAULT;
}
err = __set_rss_rxfh(netdev, indir, key);
err = __set_rss_rxfh(netdev, rxfh->indir, rxfh->key);
return err;
}

View File

@ -1057,16 +1057,16 @@ static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int fm10k_get_rssh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
u8 *key = rxfh->key;
int i, err;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
err = fm10k_get_reta(netdev, indir);
err = fm10k_get_reta(netdev, rxfh->indir);
if (err || !key)
return err;
@ -1076,23 +1076,25 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int fm10k_set_rssh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
int i, err;
/* We do not allow change in unsupported parameters */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
err = fm10k_set_reta(netdev, indir);
if (err || !key)
err = fm10k_set_reta(netdev, rxfh->indir);
if (err || !rxfh->key)
return err;
for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
u32 rssrk = le32_to_cpu(*(__le32 *)key);
for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) {
u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key);
if (interface->rssrk[i] == rssrk)
continue;

View File

@ -5120,15 +5120,13 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
/**
* i40e_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function
* @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Returns 0 on
* success.
**/
static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int i40e_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@ -5136,13 +5134,12 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
int ret;
u16 i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!indir)
if (!rxfh->indir)
return 0;
seed = key;
seed = rxfh->key;
lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@ -5150,7 +5147,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (ret)
goto out;
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
indir[i] = (u32)(lut[i]);
rxfh->indir[i] = (u32)(lut[i]);
out:
kfree(lut);
@ -5161,15 +5158,15 @@ out:
/**
* i40e_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function to use
* @rxfh: pointer to param struct (indir, key, hfunc)
* @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int i40e_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@ -5177,17 +5174,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
u8 *seed = NULL;
u16 i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (key) {
if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
if (!vsi->rss_lut_user) {
@ -5197,9 +5195,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
if (indir)
if (rxfh->indir)
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
vsi->rss_lut_user[i] = (u8)(indir[i]);
vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
else
i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
vsi->rss_size);

View File

@ -312,7 +312,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(16)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@ -414,6 +415,7 @@ struct iavf_adapter {
struct iavf_vsi vsi;
u32 aq_wait_count;
/* RSS stuff */
enum virtchnl_rss_algorithm hfunc;
u64 hena;
u16 rss_key_size;
u16 rss_lut_size;
@ -539,6 +541,7 @@ void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,

View File

@ -95,17 +95,21 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
* @hash_flds: the RSS configuration protocol hash fields
* @symm: if true, symmetric hash is required
*
* Returns 0 if the RSS configuration virtchnl message is filled successfully
*/
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
u32 packet_hdrs, u64 hash_flds)
u32 packet_hdrs, u64 hash_flds, bool symm)
{
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
if (symm)
rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
else
rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
proto_hdrs->tunnel_level = 0; /* always outer layer */

View File

@ -80,13 +80,14 @@ struct iavf_adv_rss {
u32 packet_hdrs;
u64 hash_flds;
bool symm;
struct virtchnl_rss_cfg cfg_msg;
};
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
u32 packet_hdrs, u64 hash_flds);
u32 packet_hdrs, u64 hash_flds, bool symm);
struct iavf_adv_rss *
iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
void

View File

@ -1529,11 +1529,12 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
/**
* iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
* @cmd: ethtool rxnfc command
* @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended hash fields for
* RSS configuration
*/
static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@ -1605,17 +1606,20 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
int count = 50, err = 0;
bool symm = false;
u64 hash_flds;
u32 hdrs;
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
hdrs = iavf_adv_rss_parse_hdrs(cmd);
if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
return -EINVAL;
hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
return -EINVAL;
@ -1623,7 +1627,8 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!rss_new)
return -ENOMEM;
if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
symm)) {
kfree(rss_new);
return -EINVAL;
}
@ -1642,9 +1647,11 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (rss_old) {
if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
err = -EBUSY;
} else if (rss_old->hash_flds != hash_flds) {
} else if (rss_old->hash_flds != hash_flds ||
rss_old->symm != symm) {
rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_old->hash_flds = hash_flds;
rss_old->symm = symm;
memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
sizeof(rss_new->cfg_msg));
} else {
@ -1655,6 +1662,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_new->packet_hdrs = hdrs;
rss_new->hash_flds = hash_flds;
rss_new->symm = symm;
list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
}
spin_unlock_bh(&adapter->adv_rss_lock);
@ -1894,27 +1902,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
/**
* iavf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function in use
* @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int iavf_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (key)
memcpy(key, adapter->rss_key, adapter->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
if (indir)
if (rxfh->key)
memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
if (rxfh->indir)
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
indir[i] = (u32)adapter->rss_lut[i];
rxfh->indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
@ -1922,33 +1930,46 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/**
* iavf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function to use
* @rxfh: pointer to param struct (indir, key, hfunc)
* @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int iavf_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
/* Only support toeplitz hash function */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!key && !indir)
if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
} else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
}
if (!rxfh->key && !rxfh->indir)
return 0;
if (key)
memcpy(adapter->rss_key, key, adapter->rss_key_size);
if (rxfh->key)
memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
if (indir) {
if (rxfh->indir) {
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
adapter->rss_lut[i] = (u8)(indir[i]);
adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
}
return iavf_config_rss(adapter);
@ -1957,6 +1978,7 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.cap_rss_sym_xor_supported = true,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,

View File

@ -2166,6 +2166,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_set_rss_lut(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
iavf_set_rss_hfunc(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
iavf_set_promiscuous(adapter);

View File

@ -1141,6 +1141,34 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter)
kfree(vrl);
}
/**
* iavf_set_rss_hfunc
* @adapter: adapter structure
*
* Request the PF to set our RSS Hash function
**/
void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
{
struct virtchnl_rss_hfunc *vrh;
int len = sizeof(*vrh);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
adapter->current_op);
return;
}
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh)
return;
vrh->vsi_id = adapter->vsi.id;
vrh->rss_algorithm = adapter->hfunc;
adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
kfree(vrh);
}
/**
* iavf_enable_vlan_stripping
* @adapter: adapter structure
@ -2142,6 +2170,19 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
break;
case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
if (adapter->hfunc ==
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
adapter->hfunc =
VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
else
adapter->hfunc =
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),

View File

@ -360,6 +360,7 @@ struct ice_vsi {
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
u8 rss_hfunc; /* User configured hash type */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
@ -920,6 +921,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);

View File

@ -492,10 +492,10 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U
u8 q_opt_tc;
#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)

View File

@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include "ice.h"
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"

View File

@ -2502,27 +2502,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
* @nfc: ethtool rxnfc command
* @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended
* hash fields for RSS configuration
*/
static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@ -2591,9 +2579,11 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
struct ice_rss_hash_cfg cfg;
struct device *dev;
u64 hashed_flds;
int status;
bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@ -2603,7 +2593,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
hashed_flds = ice_parse_hash_flds(nfc);
symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
hashed_flds = ice_parse_hash_flds(nfc, symm);
if (hashed_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
vsi->vsi_num);
@ -2617,7 +2608,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
cfg.hash_flds = hashed_flds;
cfg.addl_hdrs = hdrs;
cfg.hdr_type = ICE_RSS_ANY_HEADERS;
cfg.symm = symm;
status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
@ -2638,6 +2634,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@ -2656,7 +2653,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
@ -3195,11 +3192,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
/**
* ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware.
*/
static int
ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
@ -3230,17 +3234,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
vsi = vsi->tc_map_vsi[rss_context];
}
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
if (!indir)
if (!rxfh->indir)
return 0;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
err = ice_get_rss_key(vsi, key);
err = ice_get_rss_key(vsi, rxfh->key);
if (err)
goto out;
@ -3250,55 +3255,44 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
if (ice_is_adq_active(pf)) {
for (i = 0; i < vsi->rss_table_size; i++)
indir[i] = offset + lut[i] % qcount;
rxfh->indir[i] = offset + lut[i] % qcount;
goto out;
}
for (i = 0; i < vsi->rss_table_size; i++)
indir[i] = lut[i];
rxfh->indir[i] = lut[i];
out:
kfree(lut);
return err;
}
/**
* ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function
*
* Reads the indirection table directly from the hardware.
*/
static int
ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
{
return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
}
/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function
* @rxfh: pointer to param struct (indir, key, hfunc)
* @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
static int
ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
const u8 hfunc)
ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->rss_context)
return -EOPNOTSUPP;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@ -3312,7 +3306,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EOPNOTSUPP;
}
if (key) {
/* Update the VSI's hash function */
if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
err = ice_set_rss_hfunc(vsi, hfunc);
if (err)
return err;
if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
@ -3320,7 +3322,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
memcpy(vsi->rss_hkey_user, rxfh->key,
ICE_VSIQF_HKEY_ARRAY_SIZE);
err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
if (err)
@ -3335,11 +3338,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
if (indir) {
if (rxfh->indir) {
int i;
for (i = 0; i < vsi->rss_table_size; i++)
vsi->rss_lut_user[i] = (u8)(indir[i]);
vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
} else {
ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
vsi->rss_size);
@ -4217,9 +4220,11 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@ -4250,7 +4255,6 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,

View File

@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
continue;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
u64 prof_id;
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
u64 prof_id = prof->prof_id[tun];
for (i = 0; i < prof->cnt; i++) {
if (prof->vsi_h[i] != vsi_idx)
@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
return;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
u64 prof_id;
u64 prof_id = prof->prof_id[tun];
int j;
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
for (j = 0; j < prof->cnt; j++) {
u16 vsi_num;
@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
struct ice_flow_prof *hw_prof;
struct ice_fd_hw_prof *prof;
u64 prof_id;
int j;
prof = hw->fdir_prof[flow];
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
prof->fdir_seg[tun], TNL_SEG_CNT(tun),
&hw_prof);
false, &hw_prof);
for (j = 0; j < prof->cnt; j++) {
enum ice_flow_priority prio;
u64 entry_h = 0;
@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
prio = ICE_FLOW_PRIO_NORMAL;
err = ice_flow_add_entry(hw, ICE_BLK_FD,
prof_id,
hw_prof->id,
prof->vsi_h[0],
prof->vsi_h[j],
prio, prof->fdir_seg,
@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
flow);
continue;
}
prof->prof_id[tun] = hw_prof->id;
prof->entry_h[j][tun] = entry_h;
}
}
@ -638,7 +634,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
u64 entry1_h = 0;
u64 entry2_h = 0;
bool del_last;
u64 prof_id;
int err;
int idx;
@ -686,23 +681,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* That is the final parameters are 1 header (segment), no
* actions (NULL) and zero actions 0.
*/
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
TNL_SEG_CNT(tun), &prof);
err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
TNL_SEG_CNT(tun), false, &prof);
if (err)
return err;
err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (err)
goto err_prof;
err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (err)
goto err_entry;
hw_prof->fdir_seg[tun] = seg;
hw_prof->prof_id[tun] = prof->id;
hw_prof->entry_h[0][tun] = entry1_h;
hw_prof->entry_h[1][tun] = entry2_h;
hw_prof->vsi_h[0] = main_vsi->idx;
@ -719,7 +714,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
entry1_h = 0;
vsi_h = main_vsi->tc_map_vsi[idx]->idx;
err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
main_vsi->idx, vsi_h,
ICE_FLOW_PRIO_NORMAL, seg,
&entry1_h);
@ -756,7 +751,7 @@ err_unroll:
if (!hw_prof->entry_h[idx][tun])
continue;
ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
hw_prof->entry_h[idx][tun] = 0;
if (del_last)
@ -766,10 +761,10 @@ err_unroll:
hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return err;

View File

@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @blk: HW block
* @fv: field vector to search for
* @masks: masks for FV
* @symm: symmetric setting for RSS flows
* @prof_id: receives the profile ID
*/
static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
struct ice_fv_word *fv, u16 *masks, bool symm,
u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u8 i;
@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
if (blk == ICE_BLK_RSS && es->symm[i] != symm)
continue;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
}
/**
* ice_write_es - write an extraction sequence to hardware
* ice_write_es - write an extraction sequence and symmetric setting to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
* @prof_id: the profile ID to write
* @fv: pointer to the extraction sequence to write - NULL to clear extraction
* @symm: symmetric setting for RSS profiles
*/
static void
ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
struct ice_fv_word *fv)
struct ice_fv_word *fv, bool symm)
{
u16 off;
@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
memcpy(&hw->blk[blk].es.t[off], fv,
hw->blk[blk].es.fvw * sizeof(*fv));
}
if (blk == ICE_BLK_RSS)
hw->blk[blk].es.symm[prof_id] = symm;
}
/**
@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
ice_write_es(hw, blk, prof_id, NULL);
ice_write_es(hw, blk, prof_id, NULL, false);
ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->symm, 0, es->count * sizeof(*es->symm));
memset(es->written, 0, es->count * sizeof(*es->written));
memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
}
}
@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw)
ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw)
if (!es->ref_count)
goto err;
es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->symm), GFP_KERNEL);
if (!es->symm)
goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->mask_ena), GFP_KERNEL);
if (!es->mask_ena)
goto err;
prof_id->count = blk_sizes[i].prof_id;
prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
sizeof(*prof_id->id), GFP_KERNEL);
if (!prof_id->id)
goto err;
}
return 0;
@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
* @symm: symmetric setting for RSS profiles
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks)
struct ice_fv_word *es, u16 *masks, bool symm)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
goto err_ice_add_prof;
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
ice_write_es(hw, blk, prof_id, es, symm);
}
ice_prof_inc_ref(hw, blk, prof_id);
@ -3097,7 +3125,7 @@ err_ice_add_prof:
* This will search for a profile tracking ID which was previously added.
* The profile map lock should be held before calling this function.
*/
static struct ice_prof_map *
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;

View File

@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks);
struct ice_fv_word *es, u16 *masks, bool symm);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int

View File

@ -146,6 +146,7 @@ struct ice_es {
u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
u8 *symm; /* symmetric setting per profile (RSS blk)*/
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
@ -304,10 +305,16 @@ struct ice_masks {
struct ice_mask masks[ICE_PROF_MASK_COUNT];
};
struct ice_prof_id {
unsigned long *id;
int count;
};
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
struct ice_prof_id prof_id;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;

View File

@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008
/**
* ice_flow_find_prof_conds - Find a profile matching headers and conditions
@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
* @dir: flow direction
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
* @symm: symmetric setting for RSS profiles
* @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
* @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
*/
static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
u8 segs_cnt, u16 vsi_handle, u32 conds)
u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds)
{
struct ice_flow_prof *p, *prof = NULL;
@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
!test_bit(vsi_handle, p->vsis))
continue;
/* Check for symmetric settings */
if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
p->symm != symm)
continue;
/* Protocol headers must be checked. Matched fields are
* checked if specified.
*/
@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
* @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
* @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*
* Assumption: the caller has acquired the lock to the profile list
*/
static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
bool symm, struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
struct ice_prof_id *ids;
int status;
u64 prof_id;
u8 i;
if (!prof)
return -EINVAL;
ids = &hw->blk[blk].prof_id;
prof_id = find_first_zero_bit(ids->id, ids->count);
if (prof_id >= ids->count)
return -ENOSPC;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
params->prof->id = prof_id;
params->prof->dir = dir;
params->prof->segs_cnt = segs_cnt;
params->prof->symm = symm;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask);
params->mask, symm);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
INIT_LIST_HEAD(&params->prof->entries);
mutex_init(&params->prof->entries_lock);
set_bit(prof_id, ids->id);
*prof = params->prof;
out:
@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
clear_bit(prof->id, hw->blk[blk].prof_id.id);
list_del(&prof->l_entry);
mutex_destroy(&prof->entries_lock);
devm_kfree(ice_hw_to_dev(hw), prof);
@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
* @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
* @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*/
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
struct ice_flow_seg_info *segs, u8 segs_cnt,
bool symm, struct ice_flow_prof **prof)
{
int status;
@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
mutex_lock(&hw->fl_profs_locks[blk]);
status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
prof);
status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
symm, prof);
if (!status)
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
/**
* ice_flow_set_rss_seg_info - setup packet segments for RSS
* @segs: pointer to the flow field segment(s)
* @hash_fields: fields to be hashed on for the segment(s)
* @flow_hdr: protocol header fields within a packet segment
* @seg_cnt: segment count
* @cfg: configure parameters
*
* Helper function to extract fields from hash bitmap and use flow
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_flow_seg_info *seg;
u64 val;
u8 i;
u16 i;
for_each_set_bit(i, (unsigned long *)&hash_fields,
ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(segs, (enum ice_flow_field)i,
/* set inner most segment */
seg = &segs[seg_cnt - 1];
for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
(u16)ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
ICE_FLOW_SET_HDRS(segs, flow_hdr);
ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
/* set outer most header */
if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
@ -1955,6 +1984,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
return status;
}
/**
* ice_get_rss_hdr_type - get a RSS profile's header type
* @prof: RSS flow profile
*/
static enum ice_rss_cfg_hdr_type
ice_get_rss_hdr_type(struct ice_flow_prof *prof)
{
if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
return ICE_RSS_OUTER_HEADERS;
} else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
const struct ice_flow_seg_info *s;
s = &prof->segs[ICE_RSS_OUTER_HEADERS];
if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
return ICE_RSS_INNER_HEADERS;
if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
}
return ICE_RSS_ANY_HEADERS;
}
static bool
ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof,
enum ice_rss_cfg_hdr_type hdr_type)
{
return (r->hash.hdr_type == hdr_type &&
r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs);
}
/**
* ice_rem_rss_list - remove RSS configuration from list
* @hw: pointer to the hardware structure
@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
static void
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
* remove from the RSS entry list of the VSI context and delete entry.
*/
hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
if (ice_rss_match_prof(r, prof, hdr_type)) {
clear_bit(vsi_handle, r->vsis);
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
list_del(&r->l_entry);
@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
if (ice_rss_match_prof(r, prof, hdr_type)) {
set_bit(vsi_handle, r->vsis);
return 0;
}
@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
rss_cfg->hash.hdr_type = hdr_type;
rss_cfg->hash.symm = prof->symm;
set_bit(vsi_handle, rss_cfg->vsis);
list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
return 0;
}
#define ICE_FLOW_PROF_HASH_S 0
#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
#define ICE_FLOW_PROF_HDR_S 32
#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
#define ICE_FLOW_PROF_ENCAP_S 63
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1
#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format:
* [0:31] - Packet match fields
* [32:62] - Protocol header
* [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
/**
* ice_rss_config_xor_word - set the HSYMM registers for one input set word
* @hw: pointer to the hardware structure
* @prof_id: RSS hardware profile id
* @src: the FV index used by the protocol's source field
* @dst: the FV index used by the protocol's destination field
*
* Write to the HSYMM register with the index of @src FV the value of the @dst
* FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst]
* while calculating the RSS input set.
*/
#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
static void
ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
{
u32 val, reg, bits_shift;
u8 reg_idx;
reg_idx = src / GLQF_HSYMM_REG_SIZE;
bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3);
val = dst | GLQF_HSYMM_ENABLE_BIT;
reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx));
reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift);
wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg);
}
/**
* ice_rss_config_xor - set the symmetric registers for a profile's protocol
* @hw: pointer to the hardware structure
* @prof_id: RSS hardware profile id
* @src: the FV index used by the protocol's source field
* @dst: the FV index used by the protocol's destination field
* @len: length of the source/destination fields in words
*/
static void
ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
{
int fv_last_word =
ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
int i;
for (i = 0; i < len; i++) {
ice_rss_config_xor_word(hw, prof_id,
/* Yes, field vector in GLQF_HSYMM and
* GLQF_HINSET is inversed!
*/
fv_last_word - (src + i),
fv_last_word - (dst + i));
ice_rss_config_xor_word(hw, prof_id,
fv_last_word - (dst + i),
fv_last_word - (src + i));
}
}
/**
* ice_rss_set_symm - set the symmetric settings for an RSS profile
* @hw: pointer to the hardware structure
* @prof: pointer to flow profile
*
* The symmetric hash will result from XORing the protocol's fields with
* indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's
* GLQF_HSYMM registers.
*/
static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
{
struct ice_prof_map *map;
u8 prof_id, m;
mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
if (map)
prof_id = map->prof_id;
mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
if (!map)
return;
/* clear to default */
for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++)
wr32(hw, GLQF_HSYMM(prof_id, m), 0);
if (prof->symm) {
struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst;
struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst;
struct ice_flow_seg_xtrct *sctp_src, *sctp_dst;
struct ice_flow_seg_xtrct *tcp_src, *tcp_dst;
struct ice_flow_seg_xtrct *udp_src, *udp_dst;
struct ice_flow_seg_info *seg;
seg = &prof->segs[prof->segs_cnt - 1];
ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
/* xor IPv4 */
if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
ice_rss_config_xor(hw, prof_id,
ipv4_src->idx, ipv4_dst->idx, 2);
/* xor IPv6 */
if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
ice_rss_config_xor(hw, prof_id,
ipv6_src->idx, ipv6_dst->idx, 8);
/* xor TCP */
if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
ice_rss_config_xor(hw, prof_id,
tcp_src->idx, tcp_dst->idx, 1);
/* xor UDP */
if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
ice_rss_config_xor(hw, prof_id,
udp_src->idx, udp_dst->idx, 1);
/* xor SCTP */
if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
ice_rss_config_xor(hw, prof_id,
sctp_src->idx, sctp_dst->idx, 1);
}
}
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
* @addl_hdrs: protocol header fields
* @segs_cnt: packet segment count
* @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
u8 segs_cnt;
int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
return -EINVAL;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
addl_hdrs);
status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto exit;
/* Search for a flow profile that has matching headers, hash fields
* and has the input VSI associated to it. If found, no further
/* Search for a flow profile that has matching headers, hash fields,
* symm and has the input VSI associated to it. If found, no further
* operations required and exit.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS |
ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof)
goto exit;
@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* the protocol header and new hash field configuration.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof) {
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
if (!status)
@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
}
}
/* Search for a profile that has same match fields only. If this
* exists then associate the VSI to this profile.
/* Search for a profile that has the same match fields and symmetric
* setting. If this exists then associate the VSI to this profile.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (prof) {
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
goto exit;
}
/* Create a new flow profile with generated profile and packet
* segment information.
*/
/* Create a new flow profile with packet segment information. */
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
ICE_FLOW_GEN_PROFID(hashed_flds,
segs[segs_cnt - 1].hdrs,
segs_cnt),
segs, segs_cnt, &prof);
segs, segs_cnt, cfg->symm, &prof);
if (status)
goto exit;
prof->symm = cfg->symm;
ice_rss_set_symm(hw, prof);
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
/* If association to a new flow profile failed then this profile can
* be removed.
@ -2146,30 +2323,43 @@ exit:
/**
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
* @addl_hdrs: protocol header fields
* @vsi: VSI to add the RSS configuration to
* @cfg: configure parameters
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
u16 vsi_handle;
int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
if (!vsi)
return -EINVAL;
vsi_handle = vsi->idx;
if (!ice_is_vsi_valid(hw, vsi_handle) ||
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
local_cfg = *cfg;
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
} else {
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
&local_cfg);
}
}
mutex_unlock(&hw->rss_locks);
return status;
@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
* @segs_cnt: packet segment count
* @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
u8 segs_cnt;
int status;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
addl_hdrs);
status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto out;
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = -ENOENT;
@ -2233,31 +2423,39 @@ out:
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
* @cfg: configure parameters
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
* that profile and find entry associated with input VSI to be
* removed. Calls are made to underlying flow s which will APIs
* removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
int __maybe_unused
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
if (!ice_is_vsi_valid(hw, vsi_handle) ||
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
local_cfg = *cfg;
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
} else {
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle,
&local_cfg);
}
}
mutex_unlock(&hw->rss_locks);
return status;
@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @vsi: VF's VSI
* @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
{
struct ice_rss_hash_cfg hcfg;
u16 vsi_handle;
int status = 0;
u64 hash_flds;
if (!vsi)
return -EINVAL;
vsi_handle = vsi->idx;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (rss_hash == ICE_HASH_INVALID)
return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
hcfg.hash_flds = rss_hash;
hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
hcfg.symm = false;
status = ice_add_rss_cfg(hw, vsi, &hcfg);
if (status)
break;
}
@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
return status;
}
static bool rss_cfg_symm_valid(u64 hfld)
{
return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^
!!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) ||
(!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^
!!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) ||
(!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^
!!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) ||
(!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^
!!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) ||
(!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^
!!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)));
}
/**
* ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations
* @hw: pointer to the hardware structure
* @vsi: VSI to set/unset Symmetric RSS
* @symm: TRUE to set Symmetric RSS hashing
*/
int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm)
{
struct ice_rss_hash_cfg local;
struct ice_rss_cfg *r, *tmp;
u16 vsi_handle = vsi->idx;
int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
mutex_lock(&hw->rss_locks);
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) {
if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) {
local = r->hash;
local.symm = symm;
if (symm && !rss_cfg_symm_valid(r->hash.hash_flds))
continue;
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local);
if (status)
break;
}
}
mutex_unlock(&hw->rss_locks);
return status;
}
/**
* ice_replay_rss_cfg - replay RSS configurations associated with VSI
* @hw: pointer to the hardware structure
@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
if (test_bit(vsi_handle, r->vsis)) {
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_OUTER_HEADERS);
if (status)
break;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_INNER_HEADERS);
status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
if (status)
break;
}
@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hdrs: protocol header type
* @symm: whether the RSS is symmetric (bool, output)
*
* This function will return the match fields of the first instance of flow
* profile having the given header types and containing input VSI
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm)
{
u64 rss_hash = ICE_HASH_INVALID;
struct ice_rss_cfg *r;
@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
r->packet_hdr == hdrs) {
rss_hash = r->hashed_flds;
r->hash.addl_hdrs == hdrs) {
rss_hash = r->hash.hash_flds;
*symm = r->hash.symm;
break;
}
mutex_unlock(&hw->rss_locks);

View File

@ -34,6 +34,8 @@
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_FLOW_HASH_GTP_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@ -227,6 +229,19 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_MAX
};
#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
/* take inner headers as inputset for packet with outer ipv4. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
/* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
ICE_RSS_ANY_HEADERS
};
struct ice_rss_hash_cfg {
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
bool symm; /* symmetric or asymmetric hash */
};
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
@ -289,8 +322,10 @@ enum ice_flow_priority {
ICE_FLOW_PRIO_HIGH
};
#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@ -372,20 +407,21 @@ struct ice_flow_prof {
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
bool symm; /* Symmetric Hash for RSS */
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
u64 hashed_flds;
u32 packet_hdr;
struct ice_rss_hash_cfg hash;
};
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof);
struct ice_flow_seg_info *segs, u8 segs_cnt,
bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
u64 hashed_flds);
int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
const struct ice_rss_hash_cfg *cfg);
int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
#endif /* _ICE_FLOW_H_ */

View File

@ -404,6 +404,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512))
#define GLQF_HSYMM_REG_SIZE 4
#define GLQF_HSYMM_REG_PER_PROF 6
#define GLQF_HSYMM_ENABLE_BIT BIT(7)
#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)

View File

@ -1191,12 +1191,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
dev_dbg(dev, "Unsupported VSI type %s\n",
@ -1204,9 +1202,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
return;
}
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
(hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
vsi->rss_hfunc = hash_type;
ctxt->info.q_opt_rss =
FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
}
static void
@ -1605,12 +1606,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
/* configure RSS for IPv4 with input set IP src/dst */
{ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
/* configure RSS for IPv6 with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
/* configure RSS for sctp4 with input set IP src/dst - only support
* RSS on SCTPv4 on outer headers (non-tunneled)
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
/* configure RSS for sctp6 with input set IPv6 src/dst - only support
* RSS on SCTPv6 on outer headers (non-tunneled)
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
/* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
{ICE_FLOW_SEG_HDR_ESP,
ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
};
/**
* ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
* @vsi: VSI to be configured
@ -1624,11 +1657,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
*/
static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
u16 vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct device *dev;
int status;
u32 i;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@ -1636,67 +1670,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
vsi_num);
return;
}
/* configure RSS for IPv4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
vsi_num, status);
for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
vsi_num, status);
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
vsi_num, status);
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
vsi_num, status);
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
vsi_num, status);
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
ICE_FLOW_SEG_HDR_ESP);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
vsi_num, status);
status = ice_add_rss_cfg(hw, vsi, cfg);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
cfg->addl_hdrs, cfg->hash_flds,
cfg->hdr_type, cfg->symm);
}
}
/**

View File

@ -7712,6 +7712,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
return status;
}
/**
* ice_set_rss_hfunc - Set RSS HASH function
* @vsi: Pointer to VSI structure
* @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
*
* Returns 0 on success, negative on failure
*/
int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctx;
bool symm;
int err;
if (hfunc == vsi->rss_hfunc)
return 0;
if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
return -EOPNOTSUPP;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
ctx->info.q_opt_rss = vsi->info.q_opt_rss;
ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
ctx->info.q_opt_rss |=
FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
ctx->info.q_opt_tc = vsi->info.q_opt_tc;
ctx->info.q_opt_flags = vsi->info.q_opt_rss;
err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
vsi->vsi_num, err);
} else {
vsi->info.q_opt_rss = ctx->info.q_opt_rss;
vsi->rss_hfunc = hfunc;
netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
"Symmetric " : "");
}
kfree(ctx);
if (err)
return err;
/* Fix the symmetry setting for all existing RSS configurations */
symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
return ice_set_rss_cfg_symm(hw, vsi, symm);
}
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
@ -8147,13 +8200,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
enum ice_flow_priority prio;
u64 prof_id;
/* add this VSI to FDir profile for this flow */
prio = ICE_FLOW_PRIO_NORMAL;
prof = hw->fdir_prof[flow];
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
status = ice_flow_add_entry(hw, ICE_BLK_FD,
prof->prof_id[tun],
prof->vsi_h[0], vsi->idx,
prio, prof->fdir_seg[tun],
&entry_h);

View File

@ -246,6 +246,7 @@ struct ice_fd_hw_prof {
int cnt;
u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
u64 prof_id[ICE_FD_HW_SEG_MAX];
};
/* Common HW capabilities for SW use */

View File

@ -689,9 +689,7 @@ out:
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
* @rss_cfg: pointer to the virtchnl RSS cfg
* @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
* to configure
* @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
* @hash_cfg: pointer to the HW hash configuration
*
* Return true if all the protocol header and hash fields in the RSS cfg could
* be parsed, else return false
@ -699,13 +697,23 @@ out:
* This function parses the virtchnl RSS cfg to be the intended
* hash fields and the intended header for RSS configuration
*/
static bool
ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
u32 *addl_hdrs, u64 *hash_flds)
static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
struct virtchnl_rss_cfg *rss_cfg,
struct ice_rss_hash_cfg *hash_cfg)
{
const struct ice_vc_hash_field_match_type *hf_list;
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
u32 *addl_hdrs = &hash_cfg->addl_hdrs;
u64 *hash_flds = &hash_cfg->hash_flds;
/* set outer layer RSS as default */
hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
hash_cfg->symm = true;
else
hash_cfg->symm = false;
hf_list = ice_vc_hash_field_list;
hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
@ -823,8 +831,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@ -832,11 +840,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
ctx->info.q_opt_rss = ((lut_type <<
ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
(hash_type &
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
ctx->info.q_opt_rss =
FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
/* Preserve existing queueing option setting */
ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
@ -858,18 +864,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
kfree(ctx);
} else {
u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
u64 hash_flds = ICE_HASH_INVALID;
struct ice_rss_hash_cfg cfg;
if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
&hash_flds)) {
/* Only check for none raw pattern case */
if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
cfg.hash_flds = ICE_HASH_INVALID;
cfg.hdr_type = ICE_RSS_ANY_HEADERS;
if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add) {
if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs)) {
if (ice_add_rss_cfg(hw, vsi, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
vsi->vsi_num, v_ret);
@ -877,8 +889,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
} else {
int status;
status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs);
status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
/* We just ignore -ENOENT, because if two configurations
* share the same profile remove one of them actually
* removes both, since the profile is deleted.
@ -988,6 +999,51 @@ error_param:
NULL, 0);
}
/**
* ice_vc_config_rss_hfunc
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* Configure the VF's RSS Hash function
*/
static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
if (ice_set_rss_hfunc(vsi, hfunc))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
NULL, 0);
}
/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
@ -2634,7 +2690,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
}
if (vrh->hena) {
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
v_ret = ice_err_to_virt_err(status);
}
@ -3755,6 +3811,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
.config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
@ -3884,6 +3941,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
.config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
@ -4066,6 +4124,9 @@ error_handler:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
err = ops->config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
err = ops->config_rss_hfunc(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
err = ops->get_stats_msg(vf, msg);
break;

View File

@ -32,6 +32,7 @@ struct ice_virtchnl_ops {
int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg);
int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);

View File

@ -68,6 +68,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */

View File

@ -10,19 +10,6 @@
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
#define ICE_FLOW_PROF_TYPE_S 0
#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
#define ICE_FLOW_PROF_VSI_S 32
#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
/* Flow profile ID format:
* [0:31] - flow type, flow + tun_offs
* [32:63] - VSI index
*/
#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
(((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
#define GTPU_TEID_OFFSET 4
#define GTPU_EH_QFI_OFFSET 1
#define GTPU_EH_QFI_MASK 0x3F
@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
return;
vf_prof = fdir->fdir_prof[flow];
prof_id = vf_prof->prof_id[tun];
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
if (!fdir->prof_entry_cnt[flow][tun])
return;
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
if (vf_prof->entry_h[i][tun]) {
u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0;
u64 prof_id;
int ret;
pf = vf->pf;
@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
ice_vc_fdir_rem_prof(vf, flow, tun);
}
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
tun ? ICE_FLTR_PTYPE_MAX : 0);
ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
tun + 1, &prof);
ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
tun + 1, false, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (ret) {
@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
goto err_prof;
}
ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (ret) {
@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
vf_prof->prof_id[tun] = prof->id;
return 0;
err_entry_1:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
err_exit:
return ret;
}

View File

@ -75,14 +75,12 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
/**
* idpf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function in use
* @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
*/
static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int idpf_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@ -103,15 +101,14 @@ static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (key)
memcpy(key, rss_data->rss_key, rss_data->rss_key_size);
if (rxfh->key)
memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
if (indir) {
if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
indir[i] = rss_data->rss_lut[i];
rxfh->indir[i] = rss_data->rss_lut[i];
}
unlock_mutex:
@ -123,15 +120,15 @@ unlock_mutex:
/**
* idpf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
* @hfunc: hash function to use
* @rxfh: pointer to param struct (indir, key, hfunc)
* @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int idpf_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@ -154,17 +151,18 @@ static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
err = -EOPNOTSUPP;
goto unlock_mutex;
}
if (key)
memcpy(rss_data->rss_key, key, rss_data->rss_key_size);
if (rxfh->key)
memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
if (indir) {
if (rxfh->indir) {
for (lut = 0; lut < rss_data->rss_lut_size; lut++)
rss_data->rss_lut[lut] = indir[lut];
rss_data->rss_lut[lut] = rxfh->indir[lut];
}
err = idpf_config_rss(vport);

View File

@ -3280,18 +3280,17 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int igb_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!rxfh->indir)
return 0;
for (i = 0; i < IGB_RETA_SIZE; i++)
indir[i] = adapter->rss_indir_tbl[i];
rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
@ -3331,8 +3330,9 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int igb_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@ -3340,10 +3340,11 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
u32 num_queues;
/* We do not allow change in unsupported parameters */
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
@ -3360,12 +3361,12 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < IGB_RETA_SIZE; i++)
if (indir[i] >= num_queues)
if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGB_RETA_SIZE; i++)
adapter->rss_indir_tbl[i] = indir[i];
adapter->rss_indir_tbl[i] = rxfh->indir[i];
igb_write_rss_indir_tbl(adapter);

View File

@ -1426,45 +1426,46 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev)
return IGC_RETA_SIZE;
}
static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int igc_ethtool_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!rxfh->indir)
return 0;
for (i = 0; i < IGC_RETA_SIZE; i++)
indir[i] = adapter->rss_indir_tbl[i];
rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int igc_ethtool_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 num_queues;
int i;
/* We do not allow change in unsupported parameters */
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
/* Verify user input. */
for (i = 0; i < IGC_RETA_SIZE; i++)
if (indir[i] >= num_queues)
if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGC_RETA_SIZE; i++)
adapter->rss_indir_tbl[i] = indir[i];
adapter->rss_indir_tbl[i] = rxfh->indir[i];
igc_write_rss_indir_tbl(adapter);

View File

@ -3107,35 +3107,37 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int ixgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (indir)
ixgbe_get_reta(adapter, indir);
if (rxfh->indir)
ixgbe_get_reta(adapter, rxfh->indir);
if (key)
memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
if (rxfh->key)
memcpy(rxfh->key, adapter->rss_key,
ixgbe_get_rxfh_key_size(netdev));
return 0;
}
static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int ixgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
if (indir) {
if (rxfh->indir) {
int max_queues = min_t(int, adapter->num_rx_queues,
ixgbe_rss_indir_tbl_max(adapter));
@ -3146,18 +3148,19 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < reta_entries; i++)
if (indir[i] >= max_queues)
if (rxfh->indir[i] >= max_queues)
return -EINVAL;
for (i = 0; i < reta_entries; i++)
adapter->rss_indir_tbl[i] = indir[i];
adapter->rss_indir_tbl[i] = rxfh->indir[i];
ixgbe_store_reta(adapter);
}
/* Fill out the rss hash key */
if (key) {
memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
if (rxfh->key) {
memcpy(adapter->rss_key, rxfh->key,
ixgbe_get_rxfh_key_size(netdev));
ixgbe_store_key(adapter);
}

View File

@ -897,40 +897,41 @@ static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
return IXGBEVF_RSS_HASH_KEY_SIZE;
}
static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int ixgbevf_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
if (key)
memcpy(key, adapter->rss_key,
if (rxfh->key)
memcpy(rxfh->key, adapter->rss_key,
ixgbevf_get_rxfh_key_size(netdev));
if (indir) {
if (rxfh->indir) {
int i;
for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
indir[i] = adapter->rss_indir_tbl[i];
rxfh->indir[i] = adapter->rss_indir_tbl[i];
}
} else {
/* If neither indirection table nor hash key was requested
* - just return a success avoiding taking any locks.
*/
if (!indir && !key)
if (!rxfh->indir && !rxfh->key)
return 0;
spin_lock_bh(&adapter->mbx_lock);
if (indir)
err = ixgbevf_get_reta_locked(&adapter->hw, indir,
if (rxfh->indir)
err = ixgbevf_get_reta_locked(&adapter->hw,
rxfh->indir,
adapter->num_rx_queues);
if (!err && key)
err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
if (!err && rxfh->key)
err = ixgbevf_get_rss_key_locked(&adapter->hw,
rxfh->key);
spin_unlock_bh(&adapter->mbx_lock);
}

View File

@ -5030,8 +5030,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
return 0;
}
static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int mvneta_ethtool_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@ -5042,20 +5043,21 @@ static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed
* and no change in any of the unsupported parameters
*/
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
if (!rxfh->indir)
return 0;
memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
return mvneta_config_rss(pp);
}
static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
static int mvneta_ethtool_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct mvneta_port *pp = netdev_priv(dev);
@ -5063,13 +5065,12 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
if (pp->neta_armada3700)
return -EOPNOTSUPP;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!indir)
if (!rxfh->indir)
return 0;
memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
return 0;
}

View File

@ -5634,49 +5634,11 @@ static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (indir)
ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
return ret;
}
static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
return -EOPNOTSUPP;
if (key)
return -EOPNOTSUPP;
if (indir)
ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
return ret;
}
static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct mvpp2_port *port = netdev_priv(dev);
u32 rss_context = rxfh->rss_context;
int ret = 0;
if (!mvpp22_rss_is_supported(port))
@ -5684,33 +5646,34 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
rxfh->hfunc = ETH_RSS_HASH_CRC32;
if (indir)
ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
if (rxfh->indir)
ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
rxfh->indir);
return ret;
}
static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
const u32 *indir, const u8 *key,
const u8 hfunc, u32 *rss_context,
bool delete)
static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
int ret;
u32 *rss_context = &rxfh->rss_context;
int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_CRC32)
return -EOPNOTSUPP;
if (key)
if (rxfh->key)
return -EOPNOTSUPP;
if (delete)
if (*rss_context && rxfh->rss_delete)
return mvpp22_port_rss_ctx_delete(port, *rss_context);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
@ -5719,8 +5682,13 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
return ret;
}
return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
if (rxfh->indir)
ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
rxfh->indir);
return ret;
}
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@ -5740,6 +5708,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@ -5762,8 +5731,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
.get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
.set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that

View File

@ -835,21 +835,26 @@ static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
return 0;
}
/* RSS context configuration */
static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
const u8 *hkey, const u8 hfunc,
u32 *rss_context, bool delete)
/* Configure RSS table and hash key */
static int otx2_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
*rss_context >= MAX_RSS_GROUPS)
if (rxfh->rss_context)
rss_context = rxfh->rss_context;
if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
rss_context >= MAX_RSS_GROUPS)
return -EINVAL;
rss = &pfvf->hw.rss_info;
@ -859,40 +864,45 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
return -EIO;
}
if (hkey) {
memcpy(rss->key, hkey, sizeof(rss->key));
if (rxfh->key) {
memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
if (delete)
return otx2_rss_ctx_delete(pfvf, *rss_context);
if (rxfh->rss_delete)
return otx2_rss_ctx_delete(pfvf, rss_context);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
ret = otx2_rss_ctx_create(pfvf, rss_context);
if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
ret = otx2_rss_ctx_create(pfvf, &rss_context);
rxfh->rss_context = rss_context;
if (ret)
return ret;
}
if (indir) {
rss_ctx = rss->rss_ctx[*rss_context];
if (rxfh->indir) {
rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
rss_ctx->ind_tbl[idx] = indir[idx];
rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
}
otx2_set_rss_table(pfvf, *rss_context);
otx2_set_rss_table(pfvf, rss_context);
return 0;
}
static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
u8 *hkey, u8 *hfunc, u32 rss_context)
/* Get RSS configuration */
static int otx2_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
u32 *indir = rxfh->indir;
int idx, rx_queues;
rss = &pfvf->hw.rss_info;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->rss_context)
rss_context = rxfh->rss_context;
if (!indir)
return 0;
@ -914,30 +924,12 @@ static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
for (idx = 0; idx < rss->rss_size; idx++)
indir[idx] = rss_ctx->ind_tbl[idx];
}
if (hkey)
memcpy(hkey, rss->key, sizeof(rss->key));
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
return 0;
}
/* Get RSS configuration */
static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
u8 *hkey, u8 *hfunc)
{
return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
DEFAULT_RSS_CONTEXT_GROUP);
}
/* Configure RSS table and hash key */
static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *hkey, const u8 hfunc)
{
u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
}
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@ -1318,6 +1310,7 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@ -1340,8 +1333,6 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
.get_rxfh_context = otx2_get_rxfh_context,
.set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@ -1441,6 +1432,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@ -1459,8 +1451,6 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
.get_rxfh_context = otx2_get_rxfh_context,
.set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,

View File

@ -1258,8 +1258,8 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
return -EINVAL;
}
static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
u8 *hfunc)
static int mlx4_en_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
@ -1269,19 +1269,19 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
rss_rings = rounddown_pow_of_two(rss_rings);
for (i = 0; i < n; i++) {
if (!ring_index)
if (!rxfh->indir)
break;
ring_index[i] = i % rss_rings;
rxfh->indir[i] = i % rss_rings;
}
if (key)
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc)
*hfunc = priv->rss_hash_fn;
if (rxfh->key)
memcpy(rxfh->key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
rxfh->hfunc = priv->rss_hash_fn;
return 0;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
const u8 *key, const u8 hfunc)
static int mlx4_en_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
@ -1295,12 +1295,12 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
* between rings
*/
for (i = 0; i < n; i++) {
if (!ring_index)
if (!rxfh->indir)
break;
if (i > 0 && !ring_index[i] && !rss_rings)
if (i > 0 && !rxfh->indir[i] && !rss_rings)
rss_rings = i;
if (ring_index[i] != (i % (rss_rings ?: n)))
if (rxfh->indir[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
@ -1311,8 +1311,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
if (!is_power_of_2(rss_rings))
return -EINVAL;
if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
err = mlx4_en_check_rxfh_func(dev, hfunc);
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
err = mlx4_en_check_rxfh_func(dev, rxfh->hfunc);
if (err)
return err;
}
@ -1323,12 +1323,12 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
mlx4_en_stop_port(dev, 1);
}
if (ring_index)
if (rxfh->indir)
priv->prof->rss_rings = rss_rings;
if (key)
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
priv->rss_hash_fn = hfunc;
if (rxfh->key)
memcpy(priv->rss_key, rxfh->key, MLX4_EN_RSS_KEY_SIZE);
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE)
priv->rss_hash_fn = rxfh->hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);

View File

@ -1175,9 +1175,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
const u8 hfunc);
int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,

View File

@ -1262,27 +1262,29 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
rxfh->indir, rxfh->key, &rxfh->hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc,
u32 *rss_context, bool delete)
int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
int err;
mutex_lock(&priv->state_lock);
if (delete) {
if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
@ -1295,7 +1297,8 @@ static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
goto unlock;
}
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
rxfh->indir, rxfh->key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
unlock:
@ -1303,25 +1306,6 @@ unlock:
return err;
}
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
}
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@ -2398,6 +2382,7 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
@ -2420,8 +2405,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxfh_context = mlx5e_get_rxfh_context,
.set_rxfh_context = mlx5e_set_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,

View File

@ -934,11 +934,11 @@ static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
}
static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
u32 *indir, u8 *key, u8 *hfunc)
struct ethtool_rxfh_param *rxfh)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
if (indir) {
if (rxfh->indir) {
int dw_index;
int byte_index = 0;
@ -947,17 +947,17 @@ static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
lan743x_csr_read(adapter, RFE_INDX(dw_index));
byte_index = dw_index << 2;
indir[byte_index + 0] =
rxfh->indir[byte_index + 0] =
((four_entries >> 0) & 0x000000FF);
indir[byte_index + 1] =
rxfh->indir[byte_index + 1] =
((four_entries >> 8) & 0x000000FF);
indir[byte_index + 2] =
rxfh->indir[byte_index + 2] =
((four_entries >> 16) & 0x000000FF);
indir[byte_index + 3] =
rxfh->indir[byte_index + 3] =
((four_entries >> 24) & 0x000000FF);
}
}
if (key) {
if (rxfh->key) {
int dword_index;
int byte_index = 0;
@ -967,28 +967,30 @@ static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
RFE_HASH_KEY(dword_index));
byte_index = dword_index << 2;
key[byte_index + 0] =
rxfh->key[byte_index + 0] =
((four_entries >> 0) & 0x000000FF);
key[byte_index + 1] =
rxfh->key[byte_index + 1] =
((four_entries >> 8) & 0x000000FF);
key[byte_index + 2] =
rxfh->key[byte_index + 2] =
((four_entries >> 16) & 0x000000FF);
key[byte_index + 3] =
rxfh->key[byte_index + 3] =
((four_entries >> 24) & 0x000000FF);
}
}
if (hfunc)
(*hfunc) = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
const u32 *indir, const u8 *key,
const u8 hfunc)
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
u32 *indir = rxfh->indir;
u8 *key = rxfh->key;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {

View File

@ -248,28 +248,28 @@ static u32 mana_rss_indir_size(struct net_device *ndev)
return MANA_INDIRECT_TABLE_SIZE;
}
static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
static int mana_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct mana_port_context *apc = netdev_priv(ndev);
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
indir[i] = apc->indir_table[i];
rxfh->indir[i] = apc->indir_table[i];
}
if (key)
memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
if (rxfh->key)
memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
return 0;
}
static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int mana_set_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
@ -280,25 +280,26 @@ static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
if (!apc->port_is_up)
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
if (indir[i] >= apc->num_queues)
if (rxfh->indir[i] >= apc->num_queues)
return -EINVAL;
update_table = true;
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
save_table[i] = apc->indir_table[i];
apc->indir_table[i] = indir[i];
apc->indir_table[i] = rxfh->indir[i];
}
}
if (key) {
if (rxfh->key) {
update_hash = true;
memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
}
err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);

View File

@ -1794,8 +1794,8 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
return nfp_net_rss_key_sz(nn);
}
static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int nfp_net_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
@ -1803,41 +1803,41 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
if (indir)
if (rxfh->indir)
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
indir[i] = nn->rss_itbl[i];
if (key)
memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
if (hfunc) {
*hfunc = nn->rss_hfunc;
if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
*hfunc = ETH_RSS_HASH_UNKNOWN;
}
rxfh->indir[i] = nn->rss_itbl[i];
if (rxfh->key)
memcpy(rxfh->key, nn->rss_key, nfp_net_rss_key_sz(nn));
rxfh->hfunc = nn->rss_hfunc;
if (rxfh->hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
rxfh->hfunc = ETH_RSS_HASH_UNKNOWN;
return 0;
}
static int nfp_net_set_rxfh(struct net_device *netdev,
const u32 *indir, const u8 *key,
const u8 hfunc)
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
!(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
!(rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE ||
rxfh->hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
if (!key && !indir)
if (!rxfh->key && !rxfh->indir)
return 0;
if (key) {
memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
if (rxfh->key) {
memcpy(nn->rss_key, rxfh->key, nfp_net_rss_key_sz(nn));
nfp_net_rss_write_key(nn);
}
if (indir) {
if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
nn->rss_itbl[i] = indir[i];
nn->rss_itbl[i] = rxfh->indir[i];
nfp_net_rss_write_itbl(nn);
}

View File

@ -823,36 +823,38 @@ static u32 ionic_get_rxfh_key_size(struct net_device *netdev)
return IONIC_RSS_HASH_KEY_SIZE;
}
static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
static int ionic_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
struct ionic_lif *lif = netdev_priv(netdev);
unsigned int i, tbl_sz;
if (indir) {
if (rxfh->indir) {
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
for (i = 0; i < tbl_sz; i++)
indir[i] = lif->rss_ind_tbl[i];
rxfh->indir[i] = lif->rss_ind_tbl[i];
}
if (key)
memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
if (rxfh->key)
memcpy(rxfh->key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int ionic_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
return ionic_lif_rss_config(lif, lif->rss_types, key, indir);
return ionic_lif_rss_config(lif, lif->rss_types,
rxfh->key, rxfh->indir);
}
static int ionic_set_tunable(struct net_device *dev,

View File

@ -1370,28 +1370,29 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev)
return sizeof(edev->rss_key);
}
static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
static int qede_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct qede_dev *edev = netdev_priv(dev);
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!indir)
if (!rxfh->indir)
return 0;
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
indir[i] = edev->rss_ind_table[i];
rxfh->indir[i] = edev->rss_ind_table[i];
if (key)
memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
if (rxfh->key)
memcpy(rxfh->key, edev->rss_key, qede_get_rxfh_key_size(dev));
return 0;
}
static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int qede_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
struct qede_dev *edev = netdev_priv(dev);
@ -1403,20 +1404,21 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
return -EOPNOTSUPP;
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!indir && !key)
if (!rxfh->indir && !rxfh->key)
return 0;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
edev->rss_ind_table[i] = indir[i];
edev->rss_ind_table[i] = rxfh->indir[i];
edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
}
if (key) {
memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
if (rxfh->key) {
memcpy(&edev->rss_key, rxfh->key, qede_get_rxfh_key_size(dev));
edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
}

View File

@ -37,6 +37,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
.cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@ -60,8 +61,6 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
.get_rxfh_context = efx_ethtool_get_rxfh_context,
.set_rxfh_context = efx_ethtool_set_rxfh_context,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,

View File

@ -240,6 +240,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@ -269,8 +270,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
.get_rxfh_context = efx_ethtool_get_rxfh_context,
.set_rxfh_context = efx_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,

View File

@ -1163,48 +1163,8 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, efx->rss_context.rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
if (key)
memcpy(key, efx->rss_context.rx_hash_key,
efx->type->rx_hash_key_size);
return 0;
}
int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!indir && !key)
return 0;
if (!key)
key = efx->rss_context.rx_hash_key;
if (!indir)
indir = efx->rss_context.rx_indir_table;
return efx->type->rx_push_rss_config(efx, true, indir, key);
}
int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
static int efx_ethtool_get_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
@ -1214,7 +1174,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
ctx = efx_find_rss_context_entry(efx, rss_context);
ctx = efx_find_rss_context_entry(efx, rxfh->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@ -1223,37 +1183,60 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
if (rc)
goto out_unlock;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
if (key)
memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->indir)
memcpy(rxfh->indir, ctx->rx_indir_table,
sizeof(ctx->rx_indir_table));
if (rxfh->key)
memcpy(rxfh->key, ctx->rx_hash_key,
efx->type->rx_hash_key_size);
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
}
int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
const u32 *indir, const u8 *key,
const u8 hfunc, u32 *rss_context,
bool delete)
int efx_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
if (rxfh->rss_context)
return efx_ethtool_get_rxfh_context(net_dev, rxfh);
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->indir)
memcpy(rxfh->indir, efx->rss_context.rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
if (rxfh->key)
memcpy(rxfh->key, efx->rss_context.rx_hash_key,
efx->type->rx_hash_key_size);
return 0;
}
static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 *rss_context = &rxfh->rss_context;
struct efx_rss_context *ctx;
u32 *indir = rxfh->indir;
bool allocated = false;
u8 *key = rxfh->key;
int rc;
if (!efx->type->rx_push_rss_context_config)
return -EOPNOTSUPP;
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
if (delete) {
if (rxfh->rss_delete) {
/* alloc + delete == Nothing to do */
rc = -EINVAL;
goto out_unlock;
@ -1276,7 +1259,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
}
}
if (delete) {
if (rxfh->rss_delete) {
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
@ -1299,6 +1282,33 @@ out_unlock:
return rc;
}
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 *indir = rxfh->indir;
u8 *key = rxfh->key;
/* Hash function is Toeplitz, cannot be changed */
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->rss_context)
return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack);
if (!indir && !key)
return 0;
if (!key)
key = efx->rss_context.rx_hash_key;
if (!indir)
indir = efx->rss_context.rx_indir_table;
return efx->type->rx_push_rss_config(efx, true, indir, key);
}
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);

View File

@ -44,16 +44,11 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev);
int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc);
int efx_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh);
int efx_ethtool_set_rxfh(struct net_device *net_dev,
const u32 *indir, const u8 *key, const u8 hfunc);
int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context);
int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
const u32 *indir, const u8 *key,
const u8 hfunc, u32 *rss_context,
bool delete);
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,

View File

@ -1257,31 +1257,33 @@ static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
static int ef4_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct ef4_nic *efx = netdev_priv(net_dev);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->indir)
memcpy(rxfh->indir, efx->rx_indir_table,
sizeof(efx->rx_indir_table));
return 0;
}
static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int ef4_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
/* We do not allow change in unsupported parameters */
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
if (!rxfh->indir)
return 0;
return efx->type->rx_push_rss_config(efx, true, indir);
return efx->type->rx_push_rss_config(efx, true, rxfh->indir);
}
static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,

View File

@ -240,6 +240,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_siena_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@ -269,8 +270,6 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
.get_rxfh_context = efx_siena_ethtool_get_rxfh_context,
.set_rxfh_context = efx_siena_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,

View File

@ -1164,48 +1164,8 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, efx->rss_context.rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
if (key)
memcpy(key, efx->rss_context.rx_hash_key,
efx->type->rx_hash_key_size);
return 0;
}
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!indir && !key)
return 0;
if (!key)
key = efx->rss_context.rx_hash_key;
if (!indir)
indir = efx->rss_context.rx_indir_table;
return efx->type->rx_push_rss_config(efx, true, indir, key);
}
int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_rss_context *ctx;
@ -1215,7 +1175,7 @@ int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
ctx = efx_siena_find_rss_context_entry(efx, rss_context);
ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@ -1224,37 +1184,60 @@ int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
if (rc)
goto out_unlock;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
if (key)
memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->indir)
memcpy(rxfh->indir, ctx->rx_indir_table,
sizeof(ctx->rx_indir_table));
if (rxfh->key)
memcpy(rxfh->key, ctx->rx_hash_key,
efx->type->rx_hash_key_size);
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
}
int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
const u32 *indir, const u8 *key,
const u8 hfunc, u32 *rss_context,
bool delete)
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
if (rxfh->rss_context)
return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh);
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->indir)
memcpy(rxfh->indir, efx->rss_context.rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
if (rxfh->key)
memcpy(rxfh->key, efx->rss_context.rx_hash_key,
efx->type->rx_hash_key_size);
return 0;
}
static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 *rss_context = &rxfh->rss_context;
struct efx_rss_context *ctx;
u32 *indir = rxfh->indir;
bool allocated = false;
u8 *key = rxfh->key;
int rc;
if (!efx->type->rx_push_rss_context_config)
return -EOPNOTSUPP;
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
if (delete) {
if (rxfh->rss_delete) {
/* alloc + delete == Nothing to do */
rc = -EINVAL;
goto out_unlock;
@ -1277,7 +1260,7 @@ int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
}
}
if (delete) {
if (rxfh->rss_delete) {
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
@ -1300,6 +1283,33 @@ out_unlock:
return rc;
}
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 *indir = rxfh->indir;
u8 *key = rxfh->key;
/* Hash function is Toeplitz, cannot be changed */
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->rss_context)
efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack);
if (!indir && !key)
return 0;
if (!key)
key = efx->rss_context.rx_hash_key;
if (!indir)
indir = efx->rss_context.rx_indir_table;
return efx->type->rx_push_rss_config(efx, true, indir, key);
}
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = netdev_priv(net_dev);

View File

@ -41,16 +41,11 @@ int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev);
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc);
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh);
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
const u32 *indir, const u8 *key, const u8 hfunc);
int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context);
int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
const u32 *indir, const u8 *key,
const u8 hfunc, u32 *rss_context,
bool delete);
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,

View File

@ -1087,41 +1087,42 @@ static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
return ARRAY_SIZE(priv->rss.table);
}
static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
static int stmmac_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
indir[i] = priv->rss.table[i];
rxfh->indir[i] = priv->rss.table[i];
}
if (key)
memcpy(key, priv->rss.key, sizeof(priv->rss.key));
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (rxfh->key)
memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key));
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int stmmac_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
priv->rss.table[i] = indir[i];
priv->rss.table[i] = rxfh->indir[i];
}
if (key)
memcpy(priv->rss.key, key, sizeof(priv->rss.key));
if (rxfh->key)
memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key));
return stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);

View File

@ -1752,8 +1752,8 @@ static u32 netvsc_rss_indir_size(struct net_device *dev)
return ndc->rx_table_sz;
}
static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
static int netvsc_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
@ -1763,47 +1763,49 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
if (!ndev)
return -ENODEV;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
rndis_dev = ndev->extension;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
indir[i] = ndc->rx_table[i];
rxfh->indir[i] = ndc->rx_table[i];
}
if (key)
memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
if (rxfh->key)
memcpy(rxfh->key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
return 0;
}
static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int netvsc_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
struct rndis_device *rndis_dev;
u8 *key = rxfh->key;
int i;
if (!ndev)
return -ENODEV;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
rndis_dev = ndev->extension;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
if (indir[i] >= ndev->num_chn)
if (rxfh->indir[i] >= ndev->num_chn)
return -EINVAL;
for (i = 0; i < ndc->rx_table_sz; i++)
ndc->rx_table[i] = indir[i];
ndc->rx_table[i] = rxfh->indir[i];
}
if (!key) {
if (!indir)
if (!rxfh->indir)
return 0;
key = rndis_dev->rss_key;

View File

@ -3731,39 +3731,42 @@ static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
}
static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
static int virtnet_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
indir[i] = vi->ctrl->rss.indirection_table[i];
rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
}
if (key)
memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
if (rxfh->key)
memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
static int virtnet_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
vi->ctrl->rss.indirection_table[i] = indir[i];
vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
}
if (key)
memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
if (rxfh->key)
memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
virtnet_commit_rss_command(vi);

View File

@ -1136,27 +1136,26 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
}
static int
vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
vmxnet3_get_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
unsigned int n = rssConf->indTableSize;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!p)
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!rxfh->indir)
return 0;
if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
return 0;
while (n--)
p[n] = rssConf->indTable[n];
rxfh->indir[n] = rssConf->indTable[n];
return 0;
}
static int
vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
const u8 hfunc)
vmxnet3_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
unsigned int i;
unsigned long flags;
@ -1164,13 +1163,14 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
/* We do not allow change in unsupported parameters */
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!p)
if (!rxfh->indir)
return 0;
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = p[i];
rssConf->indTable[i] = rxfh->indir[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,

View File

@ -118,6 +118,7 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
/* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
@ -911,6 +912,29 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
};
/* VIRTCHNL_OP_CONFIG_RSS_HFUNC
* VF sends this message to configure the RSS hash function. Only supported
* if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation.
* The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
* by the PF.
*/
struct virtchnl_rss_hfunc {
u16 vsi_id;
u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
u32 reserved;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
* VF sends these messages to enable or disable channels based on
@ -1095,14 +1119,6 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
@ -1542,6 +1558,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
vrl->lut_entries);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
valid_len = sizeof(struct virtchnl_rss_hfunc);
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:

View File

@ -596,10 +596,47 @@ struct ethtool_mm_stats {
u64 MACMergeHoldCount;
};
/**
* struct ethtool_rxfh_param - RXFH (RSS) parameters
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
* @indir_size: On SET, the array size of the user buffer for the
* indirection table, which may be zero, or
* %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver),
* the array size of the hardware indirection table.
* @indir: The indirection table of size @indir_size entries.
* @key_size: On SET, the array size of the user buffer for the hash key,
* which may be zero. On GET (read from the driver), the size of the
* hardware hash key.
* @key: The hash key of size @key_size bytes.
* @rss_context: RSS context identifier. Context 0 is the default for normal
* traffic; other contexts can be referenced as the destination for RX flow
* classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used
* to allocate a new RSS context; on return this field will
* contain the ID of the newly allocated context.
* @rss_delete: Set to non-ZERO to remove the @rss_context context.
* @input_xfrm: Defines how the input data is transformed. Valid values are one
* of %RXH_XFRM_*.
*/
struct ethtool_rxfh_param {
u8 hfunc;
u32 indir_size;
u32 *indir;
u32 key_size;
u8 *key;
u32 rss_context;
u8 rss_delete;
u8 input_xfrm;
};
/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
* @cap_rss_ctx_supported: indicates if the driver supports RSS
* contexts.
* @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
* RSS.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Modern drivers no
@ -696,15 +733,6 @@ struct ethtool_mm_stats {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
* @get_rxfh_context: Get the contents of the RX flow hash indirection table,
* hash key, and/or hash function assiciated to the given rss context.
* Returns a negative error code or zero.
* @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting
* the contents of the RX flow hash indirection table, hash key, and/or
* hash function associated to the given context. Arguments which are set
* to %NULL or zero will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@ -787,6 +815,8 @@ struct ethtool_mm_stats {
*/
struct ethtool_ops {
u32 cap_link_lanes_supported:1;
u32 cap_rss_ctx_supported:1;
u32 cap_rss_sym_xor_supported:1;
u32 supported_coalesce_params;
u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
@ -846,15 +876,9 @@ struct ethtool_ops {
int (*reset)(struct net_device *, u32 *);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
u8 *hfunc);
int (*set_rxfh)(struct net_device *, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
u8 *hfunc, u32 rss_context);
int (*set_rxfh_context)(struct net_device *, const u32 *indir,
const u8 *key, const u8 hfunc,
u32 *rss_context, bool delete);
int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);

View File

@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
* @input_xfrm: Defines how the input data is transformed. Valid values are one
* of %RXH_XFRM_*.
* @rsvd8: Reserved for future use; see the note on reserved space.
* @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
@ -1285,7 +1287,8 @@ struct ethtool_rxfh {
__u32 indir_size;
__u32 key_size;
__u8 hfunc;
__u8 rsvd8[3];
__u8 input_xfrm;
__u8 rsvd8[2];
__u32 rsvd32;
__u32 rss_config[];
};
@ -1992,6 +1995,14 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WOL_MODE_COUNT 8
/* RSS hash function data
* XOR the corresponding source and destination fields of each specified
* protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
* calculation. Note that this XORing reduces the input set entropy and could
* be exploited to reduce the RSS queue spread.
*/
#define RXH_XFRM_SYM_XOR (1 << 0)
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */

View File

@ -908,6 +908,7 @@ enum {
ETHTOOL_A_RSS_HFUNC, /* u32 */
ETHTOOL_A_RSS_INDIR, /* binary */
ETHTOOL_A_RSS_HKEY, /* binary */
ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */
__ETHTOOL_A_RSS_CNT,
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),

View File

@ -589,8 +589,8 @@ err_free_info:
int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
{
struct ethtool_rxfh_param rxfh = {};
u32 dev_size, current_max = 0;
u32 *indir;
int ret;
if (!dev->ethtool_ops->get_rxfh_indir_size ||
@ -600,21 +600,21 @@ int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
if (dev_size == 0)
return -EOPNOTSUPP;
indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
if (!indir)
rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER);
if (!rxfh.indir)
return -ENOMEM;
ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
if (ret)
goto out;
while (dev_size--)
current_max = max(current_max, indir[dev_size]);
current_max = max(current_max, rxfh.indir[dev_size]);
*max = current_max;
out:
kfree(indir);
kfree(rxfh.indir);
return ret;
}

View File

@ -972,18 +972,35 @@ static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
u32 cmd, void __user *useraddr)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxfh_param rxfh = {};
struct ethtool_rxnfc info;
size_t info_size = sizeof(info);
int rc;
if (!dev->ethtool_ops->set_rxnfc)
if (!ops->set_rxnfc || !ops->get_rxfh)
return -EOPNOTSUPP;
rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
if (rc)
return rc;
rc = dev->ethtool_ops->set_rxnfc(dev, &info);
rc = ops->get_rxfh(dev, &rxfh);
if (rc)
return rc;
/* Sanity check: if symmetric-xor is set, then:
* 1 - no other fields besides IP src/dst and/or L4 src/dst
* 2 - If src is set, dst must also be set
*/
if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
((info.data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) ||
(!!(info.data & RXH_IP_SRC) ^ !!(info.data & RXH_IP_DST)) ||
(!!(info.data & RXH_L4_B_0_1) ^ !!(info.data & RXH_L4_B_2_3))))
return -EINVAL;
rc = ops->set_rxnfc(dev, &info);
if (rc)
return rc;
@ -1061,15 +1078,15 @@ EXPORT_SYMBOL(netdev_rss_key_fill);
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
u32 user_size, dev_size;
u32 *indir;
struct ethtool_rxfh_param rxfh = {};
u32 user_size;
int ret;
if (!dev->ethtool_ops->get_rxfh_indir_size ||
!dev->ethtool_ops->get_rxfh)
return -EOPNOTSUPP;
dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
if (dev_size == 0)
rxfh.indir_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
if (rxfh.indir_size == 0)
return -EOPNOTSUPP;
if (copy_from_user(&user_size,
@ -1078,41 +1095,41 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
return -EFAULT;
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
&dev_size, sizeof(dev_size)))
&rxfh.indir_size, sizeof(rxfh.indir_size)))
return -EFAULT;
/* If the user buffer size is 0, this is just a query for the
* device table size. Otherwise, if it's smaller than the
* device table size it's an error.
*/
if (user_size < dev_size)
if (user_size < rxfh.indir_size)
return user_size == 0 ? 0 : -EINVAL;
indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
if (!indir)
rxfh.indir = kcalloc(rxfh.indir_size, sizeof(rxfh.indir[0]), GFP_USER);
if (!rxfh.indir)
return -ENOMEM;
ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
if (ret)
goto out;
if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh_indir, ring_index[0]),
indir, dev_size * sizeof(indir[0])))
rxfh.indir, rxfh.indir_size * sizeof(*rxfh.indir)))
ret = -EFAULT;
out:
kfree(indir);
kfree(rxfh.indir);
return ret;
}
static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
struct ethtool_rxnfc rx_rings;
u32 user_size, dev_size, i;
u32 *indir;
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxfh_param rxfh_dev = {};
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
u32 user_size, i;
int ret;
u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
@ -1120,8 +1137,8 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
!ops->get_rxnfc)
return -EOPNOTSUPP;
dev_size = ops->get_rxfh_indir_size(dev);
if (dev_size == 0)
rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev);
if (rxfh_dev.indir_size == 0)
return -EOPNOTSUPP;
if (copy_from_user(&user_size,
@ -1129,11 +1146,12 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
sizeof(user_size)))
return -EFAULT;
if (user_size != 0 && user_size != dev_size)
if (user_size != 0 && user_size != rxfh_dev.indir_size)
return -EINVAL;
indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
if (!indir)
rxfh_dev.indir = kcalloc(rxfh_dev.indir_size,
sizeof(rxfh_dev.indir[0]), GFP_USER);
if (!rxfh_dev.indir)
return -ENOMEM;
rx_rings.cmd = ETHTOOL_GRXRINGS;
@ -1142,18 +1160,21 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
goto out;
if (user_size == 0) {
for (i = 0; i < dev_size; i++)
u32 *indir = rxfh_dev.indir;
for (i = 0; i < rxfh_dev.indir_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
} else {
ret = ethtool_copy_validate_indir(indir,
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
useraddr + ringidx_offset,
&rx_rings,
dev_size);
rxfh_dev.indir_size);
if (ret)
goto out;
}
ret = ops->set_rxfh(dev, indir, NULL, ETH_RSS_HASH_NO_CHANGE);
rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE;
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
if (ret)
goto out;
@ -1164,32 +1185,29 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
dev->priv_flags |= IFF_RXFH_CONFIGURED;
out:
kfree(indir);
kfree(rxfh_dev.indir);
return ret;
}
static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
void __user *useraddr)
{
int ret;
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxfh_param rxfh_dev = {};
u32 user_indir_size, user_key_size;
u32 dev_indir_size = 0, dev_key_size = 0;
struct ethtool_rxfh rxfh;
u32 total_size;
u32 indir_bytes;
u32 *indir = NULL;
u8 dev_hfunc = 0;
u8 *hkey = NULL;
u8 *rss_config;
u32 total_size;
int ret;
if (!ops->get_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
dev_indir_size = ops->get_rxfh_indir_size(dev);
rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev);
if (ops->get_rxfh_key_size)
dev_key_size = ops->get_rxfh_key_size(dev);
rxfh_dev.key_size = ops->get_rxfh_key_size(dev);
if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
return -EFAULT;
@ -1197,44 +1215,41 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
user_key_size = rxfh.key_size;
/* Check that reserved fields are 0 for now */
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32)
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
if (rxfh.rss_context && !ops->get_rxfh_context)
if (rxfh.rss_context && !ops->cap_rss_ctx_supported)
return -EOPNOTSUPP;
rxfh.indir_size = dev_indir_size;
rxfh.key_size = dev_key_size;
rxfh.indir_size = rxfh_dev.indir_size;
rxfh.key_size = rxfh_dev.key_size;
if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
return -EFAULT;
if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
(user_key_size && (user_key_size != dev_key_size)))
if ((user_indir_size && user_indir_size != rxfh_dev.indir_size) ||
(user_key_size && user_key_size != rxfh_dev.key_size))
return -EINVAL;
indir_bytes = user_indir_size * sizeof(indir[0]);
indir_bytes = user_indir_size * sizeof(rxfh_dev.indir[0]);
total_size = indir_bytes + user_key_size;
rss_config = kzalloc(total_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
if (user_indir_size)
indir = (u32 *)rss_config;
rxfh_dev.indir = (u32 *)rss_config;
if (user_key_size)
hkey = rss_config + indir_bytes;
rxfh_dev.key = rss_config + indir_bytes;
if (rxfh.rss_context)
ret = dev->ethtool_ops->get_rxfh_context(dev, indir, hkey,
&dev_hfunc,
rxfh.rss_context);
else
ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey, &dev_hfunc);
rxfh_dev.rss_context = rxfh.rss_context;
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev);
if (ret)
goto out;
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc),
&dev_hfunc, sizeof(rxfh.hfunc))) {
&rxfh_dev.hfunc, sizeof(rxfh.hfunc))) {
ret = -EFAULT;
} else if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh, rss_config[0]),
@ -1250,16 +1265,16 @@ out:
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
int ret;
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 dev_indir_size = 0, dev_key_size = 0, i;
struct ethtool_rxfh_param rxfh_dev = {};
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
u32 dev_indir_size = 0, dev_key_size = 0, i;
u32 *indir = NULL, indir_bytes = 0;
u8 *hkey = NULL;
u32 indir_bytes = 0;
u8 *rss_config;
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
bool delete = false;
int ret;
if (!ops->get_rxnfc || !ops->set_rxfh)
return -EOPNOTSUPP;
@ -1273,10 +1288,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
return -EFAULT;
/* Check that reserved fields are 0 for now */
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32)
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
if (rxfh.rss_context && !ops->set_rxfh_context)
if (rxfh.rss_context && !ops->cap_rss_ctx_supported)
return -EOPNOTSUPP;
/* Check input data transformation capabilities */
if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
!ops->cap_rss_sym_xor_supported)
return -EOPNOTSUPP;
/* If either indir, hash key or function is valid, proceed further.
@ -1291,7 +1310,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
return -EINVAL;
if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
indir_bytes = dev_indir_size * sizeof(indir[0]);
indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
if (!rss_config)
@ -1308,8 +1327,9 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
*/
if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
indir = (u32 *)rss_config;
ret = ethtool_copy_validate_indir(indir,
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
useraddr + rss_cfg_offset,
&rx_rings,
rxfh.indir_size);
@ -1317,17 +1337,22 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
goto out;
} else if (rxfh.indir_size == 0) {
if (rxfh.rss_context == 0) {
indir = (u32 *)rss_config;
u32 *indir;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
indir = rxfh_dev.indir;
for (i = 0; i < dev_indir_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
} else {
delete = true;
rxfh_dev.rss_delete = true;
}
}
if (rxfh.key_size) {
hkey = rss_config + indir_bytes;
if (copy_from_user(hkey,
rxfh_dev.key_size = dev_key_size;
rxfh_dev.key = rss_config + indir_bytes;
if (copy_from_user(rxfh_dev.key,
useraddr + rss_cfg_offset + indir_bytes,
rxfh.key_size)) {
ret = -EFAULT;
@ -1335,19 +1360,19 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
}
if (rxfh.rss_context)
ret = ops->set_rxfh_context(dev, indir, hkey, rxfh.hfunc,
&rxfh.rss_context, delete);
else
ret = ops->set_rxfh(dev, indir, hkey, rxfh.hfunc);
rxfh_dev.hfunc = rxfh.hfunc;
rxfh_dev.rss_context = rxfh.rss_context;
rxfh_dev.input_xfrm = rxfh.input_xfrm;
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
if (ret)
goto out;
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
&rxfh.rss_context, sizeof(rxfh.rss_context)))
&rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context)))
ret = -EFAULT;
if (!rxfh.rss_context) {
if (!rxfh_dev.rss_context) {
/* indicate whether rxfh was set to default */
if (rxfh.indir_size == 0)
dev->priv_flags &= ~IFF_RXFH_CONFIGURED;

View File

@ -13,6 +13,7 @@ struct rss_reply_data {
u32 indir_size;
u32 hkey_size;
u32 hfunc;
u32 input_xfrm;
u32 *indir_table;
u8 *hkey;
};
@ -48,9 +49,9 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
struct rss_reply_data *data = RSS_REPDATA(reply_base);
struct rss_req_info *request = RSS_REQINFO(req_base);
struct net_device *dev = reply_base->dev;
struct ethtool_rxfh_param rxfh = {};
const struct ethtool_ops *ops;
u32 total_size, indir_bytes;
u8 dev_hfunc = 0;
u8 *rss_config;
int ret;
@ -59,7 +60,7 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
return -EOPNOTSUPP;
/* Some drivers don't handle rss_context */
if (request->rss_context && !ops->get_rxfh_context)
if (request->rss_context && !ops->cap_rss_ctx_supported)
return -EOPNOTSUPP;
ret = ethnl_ops_begin(dev);
@ -83,21 +84,21 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
if (data->indir_size)
data->indir_table = (u32 *)rss_config;
if (data->hkey_size)
data->hkey = rss_config + indir_bytes;
if (request->rss_context)
ret = ops->get_rxfh_context(dev, data->indir_table, data->hkey,
&dev_hfunc, request->rss_context);
else
ret = ops->get_rxfh(dev, data->indir_table, data->hkey,
&dev_hfunc);
rxfh.indir_size = data->indir_size;
rxfh.indir = data->indir_table;
rxfh.key_size = data->hkey_size;
rxfh.key = data->hkey;
rxfh.rss_context = request->rss_context;
ret = ops->get_rxfh(dev, &rxfh);
if (ret)
goto out_ops;
data->hfunc = dev_hfunc;
data->hfunc = rxfh.hfunc;
data->input_xfrm = rxfh.input_xfrm;
out_ops:
ethnl_ops_complete(dev);
return ret;
@ -111,6 +112,7 @@ rss_reply_size(const struct ethnl_req_info *req_base,
int len;
len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
nla_total_size(data->hkey_size); /* _RSS_HKEY */
@ -125,6 +127,8 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
if ((data->hfunc &&
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
(data->input_xfrm &&
nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) ||
(data->indir_size &&
nla_put(skb, ETHTOOL_A_RSS_INDIR,
sizeof(u32) * data->indir_size, data->indir_table)) ||