Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
idpf: XDP chapter I: convert Rx to libeth

Alexander Lobakin says:

XDP for idpf is currently 5 chapters:
* convert Rx to libeth (this);
* convert Tx and stats to libeth;
* generic XDP and XSk code changes, libeth_xdp;
* actual XDP for idpf via libeth_xdp;
* XSk for idpf (^).

Part I does the following:
* splits &idpf_queue into 4 (RQ, SQ, FQ, CQ) and puts them on a diet;
* ensures optimal cacheline placement, strictly asserts CL sizes;
* moves currently unused/dead singleq mode out of line;
* reuses libeth's Rx ptype definitions and helpers;
* uses libeth's Rx buffer management for both header and payload;
* eliminates memcpy()s and coherent DMA uses on hotpath, uses
  napi_build_skb() instead of in-place short skb allocation.

Most idpf patches, except for the queue split, removes more lines
than adds.

Expect far better memory utilization and +5-8% on Rx depending on
the case (+17% on skb XDP_DROP :>).

* '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  idpf: use libeth Rx buffer management for payload buffer
  idpf: convert header split mode to libeth + napi_build_skb()
  libeth: support different types of buffers for Rx
  idpf: remove legacy Page Pool Ethtool stats
  idpf: reuse libeth's definitions of parsed ptype structures
  idpf: compile singleq code only under default-n CONFIG_IDPF_SINGLEQ
  idpf: merge singleq and splitq &net_device_ops
  idpf: strictly assert cachelines of queue and queue vector structures
  idpf: avoid bloating &idpf_q_vector with big %NR_CPUS
  idpf: split &idpf_queue into 4 strictly-typed queue structures
  idpf: stop using macros for accessing queue descriptors
  libeth: add cacheline / struct layout assertion helpers
  page_pool: use __cacheline_group_{begin, end}_aligned()
  cache: add __cacheline_group_{begin, end}_aligned() (+ couple more)
====================

Link: https://patch.msgid.link/20240710203031.188081-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-07-12 22:27:25 -07:00
commit 69cf87304d
18 changed files with 1841 additions and 1420 deletions

View File

@ -384,17 +384,6 @@ config IGC_LEDS
Optional support for controlling the NIC LED's with the netdev Optional support for controlling the NIC LED's with the netdev
LED trigger. LED trigger.
config IDPF source "drivers/net/ethernet/intel/idpf/Kconfig"
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
select DIMLIB
select PAGE_POOL
select PAGE_POOL_STATS
help
This driver supports Intel(R) Infrastructure Data Path Function
devices.
To compile this driver as a module, choose M here. The module
will be called idpf.
endif # NET_VENDOR_INTEL endif # NET_VENDOR_INTEL

View File

@ -0,0 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2024 Intel Corporation
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
select DIMLIB
select LIBETH
help
This driver supports Intel(R) Infrastructure Data Path Function
devices.
To compile this driver as a module, choose M here. The module
will be called idpf.
if IDPF
config IDPF_SINGLEQ
bool "idpf singleq support"
help
This option enables support for legacy single Rx/Tx queues w/no
completion and fill queues. Only enable if you have hardware which
wants to work in this mode as it increases the driver size and adds
runtme checks on hotpath.
endif # IDPF

View File

@ -12,7 +12,8 @@ idpf-y := \
idpf_ethtool.o \ idpf_ethtool.o \
idpf_lib.o \ idpf_lib.o \
idpf_main.o \ idpf_main.o \
idpf_singleq_txrx.o \
idpf_txrx.o \ idpf_txrx.o \
idpf_virtchnl.o \ idpf_virtchnl.o \
idpf_vf_dev.o idpf_vf_dev.o
idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o

View File

@ -17,10 +17,8 @@ struct idpf_vport_max_q;
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/ethtool_netlink.h> #include <linux/ethtool_netlink.h>
#include <net/gro.h> #include <net/gro.h>
#include <linux/dim.h>
#include "virtchnl2.h" #include "virtchnl2.h"
#include "idpf_lan_txrx.h"
#include "idpf_txrx.h" #include "idpf_txrx.h"
#include "idpf_controlq.h" #include "idpf_controlq.h"
@ -266,7 +264,6 @@ struct idpf_port_stats {
* the worst case. * the worst case.
* @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
* @bufq_desc_count: Buffer queue descriptor count * @bufq_desc_count: Buffer queue descriptor count
* @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
* @num_rxq_grp: Number of RX queues in a group * @num_rxq_grp: Number of RX queues in a group
* @rxq_grps: Total number of RX groups. Number of groups * number of RX per * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
* group will yield total number of RX queues. * group will yield total number of RX queues.
@ -302,7 +299,7 @@ struct idpf_vport {
u16 num_txq_grp; u16 num_txq_grp;
struct idpf_txq_group *txq_grps; struct idpf_txq_group *txq_grps;
u32 txq_model; u32 txq_model;
struct idpf_queue **txqs; struct idpf_tx_queue **txqs;
bool crc_enable; bool crc_enable;
u16 num_rxq; u16 num_rxq;
@ -310,11 +307,10 @@ struct idpf_vport {
u32 rxq_desc_count; u32 rxq_desc_count;
u8 num_bufqs_per_qgrp; u8 num_bufqs_per_qgrp;
u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
u16 num_rxq_grp; u16 num_rxq_grp;
struct idpf_rxq_group *rxq_grps; struct idpf_rxq_group *rxq_grps;
u32 rxq_model; u32 rxq_model;
struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; struct libeth_rx_pt *rx_ptype_lkup;
struct idpf_adapter *adapter; struct idpf_adapter *adapter;
struct net_device *netdev; struct net_device *netdev;
@ -601,7 +597,8 @@ struct idpf_adapter {
*/ */
static inline int idpf_is_queue_model_split(u16 q_model) static inline int idpf_is_queue_model_split(u16 q_model)
{ {
return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
} }
#define idpf_is_cap_ena(adapter, field, flag) \ #define idpf_is_cap_ena(adapter, field, flag) \

View File

@ -437,22 +437,24 @@ struct idpf_stats {
.stat_offset = offsetof(_type, _stat) \ .stat_offset = offsetof(_type, _stat) \
} }
/* Helper macro for defining some statistics related to queues */ /* Helper macros for defining some statistics related to queues */
#define IDPF_QUEUE_STAT(_name, _stat) \ #define IDPF_RX_QUEUE_STAT(_name, _stat) \
IDPF_STAT(struct idpf_queue, _name, _stat) IDPF_STAT(struct idpf_rx_queue, _name, _stat)
#define IDPF_TX_QUEUE_STAT(_name, _stat) \
IDPF_STAT(struct idpf_tx_queue, _name, _stat)
/* Stats associated with a Tx queue */ /* Stats associated with a Tx queue */
static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
}; };
/* Stats associated with an Rx queue */ /* Stats associated with an Rx queue */
static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
}; };
#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
@ -563,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < vport_config->max_q.max_rxq; i++) for (i = 0; i < vport_config->max_q.max_rxq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
"rx", i); "rx", i);
page_pool_ethtool_stats_get_strings(data);
} }
/** /**
@ -598,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
u16 max_txq, max_rxq; u16 max_txq, max_rxq;
unsigned int size;
if (sset != ETH_SS_STATS) if (sset != ETH_SS_STATS)
return -EINVAL; return -EINVAL;
@ -617,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
max_txq = vport_config->max_q.max_txq; max_txq = vport_config->max_q.max_txq;
max_rxq = vport_config->max_q.max_rxq; max_rxq = vport_config->max_q.max_rxq;
size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
(IDPF_RX_QUEUE_STATS_LEN * max_rxq); (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
size += page_pool_ethtool_stats_get_count();
return size;
} }
/** /**
@ -633,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
* Copies the stat data defined by the pointer and stat structure pair into * Copies the stat data defined by the pointer and stat structure pair into
* the memory supplied as data. If the pointer is null, data will be zero'd. * the memory supplied as data. If the pointer is null, data will be zero'd.
*/ */
static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
const struct idpf_stats *stat) const struct idpf_stats *stat)
{ {
char *p; char *p;
@ -671,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
* idpf_add_queue_stats - copy queue statistics into supplied buffer * idpf_add_queue_stats - copy queue statistics into supplied buffer
* @data: ethtool stats buffer * @data: ethtool stats buffer
* @q: the queue to copy * @q: the queue to copy
* @type: type of the queue
* *
* Queue statistics must be copied while protected by u64_stats_fetch_begin, * Queue statistics must be copied while protected by u64_stats_fetch_begin,
* so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
@ -681,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
* *
* This function expects to be called while under rcu_read_lock(). * This function expects to be called while under rcu_read_lock().
*/ */
static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) static void idpf_add_queue_stats(u64 **data, const void *q,
enum virtchnl2_queue_type type)
{ {
const struct u64_stats_sync *stats_sync;
const struct idpf_stats *stats; const struct idpf_stats *stats;
unsigned int start; unsigned int start;
unsigned int size; unsigned int size;
unsigned int i; unsigned int i;
if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
size = IDPF_RX_QUEUE_STATS_LEN; size = IDPF_RX_QUEUE_STATS_LEN;
stats = idpf_gstrings_rx_queue_stats; stats = idpf_gstrings_rx_queue_stats;
stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
} else { } else {
size = IDPF_TX_QUEUE_STATS_LEN; size = IDPF_TX_QUEUE_STATS_LEN;
stats = idpf_gstrings_tx_queue_stats; stats = idpf_gstrings_tx_queue_stats;
stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
} }
/* To avoid invalid statistics values, ensure that we keep retrying /* To avoid invalid statistics values, ensure that we keep retrying
@ -701,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
* u64_stats_fetch_retry. * u64_stats_fetch_retry.
*/ */
do { do {
start = u64_stats_fetch_begin(&q->stats_sync); start = u64_stats_fetch_begin(stats_sync);
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
} while (u64_stats_fetch_retry(&q->stats_sync, start)); } while (u64_stats_fetch_retry(stats_sync, start));
/* Once we successfully copy the stats in, update the data pointer */ /* Once we successfully copy the stats in, update the data pointer */
*data += size; *data += size;
@ -793,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < num_rxq; j++) { for (j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats; struct idpf_rx_queue_stats *stats;
struct idpf_queue *rxq; struct idpf_rx_queue *rxq;
unsigned int start; unsigned int start;
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
@ -807,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do { do {
start = u64_stats_fetch_begin(&rxq->stats_sync); start = u64_stats_fetch_begin(&rxq->stats_sync);
stats = &rxq->q_stats.rx; stats = &rxq->q_stats;
hw_csum_err = u64_stats_read(&stats->hw_csum_err); hw_csum_err = u64_stats_read(&stats->hw_csum_err);
hsplit = u64_stats_read(&stats->hsplit_pkts); hsplit = u64_stats_read(&stats->hsplit_pkts);
hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
@ -828,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < txq_grp->num_txq; j++) { for (j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs; u64 linearize, qbusy, skb_drops, dma_map_errs;
struct idpf_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats; struct idpf_tx_queue_stats *stats;
unsigned int start; unsigned int start;
@ -838,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do { do {
start = u64_stats_fetch_begin(&txq->stats_sync); start = u64_stats_fetch_begin(&txq->stats_sync);
stats = &txq->q_stats.tx; stats = &txq->q_stats;
linearize = u64_stats_read(&stats->linearize); linearize = u64_stats_read(&stats->linearize);
qbusy = u64_stats_read(&stats->q_busy); qbusy = u64_stats_read(&stats->q_busy);
skb_drops = u64_stats_read(&stats->skb_drops); skb_drops = u64_stats_read(&stats->skb_drops);
@ -869,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{ {
struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
struct page_pool_stats pp_stats = { };
struct idpf_vport *vport; struct idpf_vport *vport;
unsigned int total = 0; unsigned int total = 0;
unsigned int i, j; unsigned int i, j;
@ -896,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
qtype = VIRTCHNL2_QUEUE_TYPE_TX; qtype = VIRTCHNL2_QUEUE_TYPE_TX;
for (j = 0; j < txq_grp->num_txq; j++, total++) { for (j = 0; j < txq_grp->num_txq; j++, total++) {
struct idpf_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq) if (!txq)
idpf_add_empty_queue_stats(&data, qtype); idpf_add_empty_queue_stats(&data, qtype);
else else
idpf_add_queue_stats(&data, txq); idpf_add_queue_stats(&data, txq, qtype);
} }
} }
@ -929,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
num_rxq = rxq_grp->singleq.num_rxq; num_rxq = rxq_grp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, total++) { for (j = 0; j < num_rxq; j++, total++) {
struct idpf_queue *rxq; struct idpf_rx_queue *rxq;
if (is_splitq) if (is_splitq)
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
@ -938,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
if (!rxq) if (!rxq)
idpf_add_empty_queue_stats(&data, qtype); idpf_add_empty_queue_stats(&data, qtype);
else else
idpf_add_queue_stats(&data, rxq); idpf_add_queue_stats(&data, rxq, qtype);
/* In splitq mode, don't get page pool stats here since
* the pools are attached to the buffer queues
*/
if (is_splitq)
continue;
if (rxq)
page_pool_get_stats(rxq->pp, &pp_stats);
}
}
for (i = 0; i < vport->num_rxq_grp; i++) {
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
struct idpf_queue *rxbufq =
&vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
page_pool_get_stats(rxbufq->pp, &pp_stats);
} }
} }
for (; total < vport_config->max_q.max_rxq; total++) for (; total < vport_config->max_q.max_rxq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
page_pool_ethtool_stats_get(data, &pp_stats);
rcu_read_unlock(); rcu_read_unlock();
idpf_vport_ctrl_unlock(netdev); idpf_vport_ctrl_unlock(netdev);
} }
/** /**
* idpf_find_rxq - find rxq from q index * idpf_find_rxq_vec - find rxq vector from q index
* @vport: virtual port associated to queue * @vport: virtual port associated to queue
* @q_num: q index used to find queue * @q_num: q index used to find queue
* *
* returns pointer to rx queue * returns pointer to rx vector
*/ */
static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
int q_num)
{ {
int q_grp, q_idx; int q_grp, q_idx;
if (!idpf_is_queue_model_split(vport->rxq_model)) if (!idpf_is_queue_model_split(vport->rxq_model))
return vport->rxq_grps->singleq.rxqs[q_num]; return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
} }
/** /**
* idpf_find_txq - find txq from q index * idpf_find_txq_vec - find txq vector from q index
* @vport: virtual port associated to queue * @vport: virtual port associated to queue
* @q_num: q index used to find queue * @q_num: q index used to find queue
* *
* returns pointer to tx queue * returns pointer to tx vector
*/ */
static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
int q_num)
{ {
int q_grp; int q_grp;
if (!idpf_is_queue_model_split(vport->txq_model)) if (!idpf_is_queue_model_split(vport->txq_model))
return vport->txqs[q_num]; return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
return vport->txq_grps[q_grp].complq; return vport->txq_grps[q_grp].complq->q_vector;
} }
/** /**
* __idpf_get_q_coalesce - get ITR values for specific queue * __idpf_get_q_coalesce - get ITR values for specific queue
* @ec: ethtool structure to fill with driver's coalesce settings * @ec: ethtool structure to fill with driver's coalesce settings
* @q: quuee of Rx or Tx * @q_vector: queue vector corresponding to this queue
* @type: queue type
*/ */
static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
struct idpf_queue *q) const struct idpf_q_vector *q_vector,
enum virtchnl2_queue_type type)
{ {
if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
ec->use_adaptive_rx_coalesce = ec->use_adaptive_rx_coalesce =
IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; ec->rx_coalesce_usecs = q_vector->rx_itr_value;
} else { } else {
ec->use_adaptive_tx_coalesce = ec->use_adaptive_tx_coalesce =
IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; ec->tx_coalesce_usecs = q_vector->tx_itr_value;
} }
} }
@ -1040,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, struct ethtool_coalesce *ec,
u32 q_num) u32 q_num)
{ {
struct idpf_netdev_priv *np = netdev_priv(netdev); const struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport *vport; const struct idpf_vport *vport;
int err = 0; int err = 0;
idpf_vport_ctrl_lock(netdev); idpf_vport_ctrl_lock(netdev);
@ -1056,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
} }
if (q_num < vport->num_rxq) if (q_num < vport->num_rxq)
__idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_RX);
if (q_num < vport->num_txq) if (q_num < vport->num_txq)
__idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex: unlock_mutex:
idpf_vport_ctrl_unlock(netdev); idpf_vport_ctrl_unlock(netdev);
@ -1103,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/** /**
* __idpf_set_q_coalesce - set ITR values for specific queue * __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings * @ec: ethtool structure from user to update ITR settings
* @q: queue for which itr values has to be set * @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx * @is_rxq: is queue type rx
* *
* Returns 0 on success, negative otherwise. * Returns 0 on success, negative otherwise.
*/ */
static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
struct idpf_queue *q, bool is_rxq) struct idpf_q_vector *qv, bool is_rxq)
{ {
u32 use_adaptive_coalesce, coalesce_usecs; u32 use_adaptive_coalesce, coalesce_usecs;
struct idpf_q_vector *qv = q->q_vector;
bool is_dim_ena = false; bool is_dim_ena = false;
u16 itr_val; u16 itr_val;
@ -1128,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
itr_val = qv->tx_itr_value; itr_val = qv->tx_itr_value;
} }
if (coalesce_usecs != itr_val && use_adaptive_coalesce) { if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
return -EINVAL; return -EINVAL;
} }
@ -1137,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
return 0; return 0;
if (coalesce_usecs > IDPF_ITR_MAX) { if (coalesce_usecs > IDPF_ITR_MAX) {
netdev_err(q->vport->netdev, netdev_err(qv->vport->netdev,
"Invalid value, %d-usecs range is 0-%d\n", "Invalid value, %d-usecs range is 0-%d\n",
coalesce_usecs, IDPF_ITR_MAX); coalesce_usecs, IDPF_ITR_MAX);
@ -1146,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
if (coalesce_usecs % 2) { if (coalesce_usecs % 2) {
coalesce_usecs--; coalesce_usecs--;
netdev_info(q->vport->netdev, netdev_info(qv->vport->netdev,
"HW only supports even ITR values, ITR rounded to %d\n", "HW only supports even ITR values, ITR rounded to %d\n",
coalesce_usecs); coalesce_usecs);
} }
@ -1185,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
* *
* Return 0 on success, and negative on failure * Return 0 on success, and negative on failure
*/ */
static int idpf_set_q_coalesce(struct idpf_vport *vport, static int idpf_set_q_coalesce(const struct idpf_vport *vport,
struct ethtool_coalesce *ec, const struct ethtool_coalesce *ec,
int q_num, bool is_rxq) int q_num, bool is_rxq)
{ {
struct idpf_queue *q; struct idpf_q_vector *qv;
q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
idpf_find_txq_vec(vport, q_num);
if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
return -EINVAL; return -EINVAL;
return 0; return 0;

View File

@ -4,6 +4,8 @@
#ifndef _IDPF_LAN_TXRX_H_ #ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_ #define _IDPF_LAN_TXRX_H_
#include <linux/bits.h>
enum idpf_rss_hash { enum idpf_rss_hash {
IDPF_HASH_INVALID = 0, IDPF_HASH_INVALID = 0,
/* Values 1 - 28 are reserved for future use */ /* Values 1 - 28 are reserved for future use */

View File

@ -4,8 +4,7 @@
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h" #include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq; static const struct net_device_ops idpf_netdev_ops;
static const struct net_device_ops idpf_netdev_ops_singleq;
/** /**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{ {
clear_bit(IDPF_MB_INTR_MODE, adapter->flags); clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
free_irq(adapter->msix_entries[0].vector, adapter); kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
} }
@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
*/ */
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{ {
struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err; int irq_num, mb_vidx = 0, err;
char *name;
irq_num = adapter->msix_entries[mb_vidx].vector; irq_num = adapter->msix_entries[mb_vidx].vector;
mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", name = kasprintf(GFP_KERNEL, "%s-%s-%d",
dev_driver_string(&adapter->pdev->dev), dev_driver_string(&adapter->pdev->dev),
"Mailbox", mb_vidx); "Mailbox", mb_vidx);
err = request_irq(irq_num, adapter->irq_mb_handler, 0, err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
mb_vector->name, adapter);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err); "IRQ request for mailbox failed, error: %d\n", err);
@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
} }
/* assign netdev_ops */ /* assign netdev_ops */
if (idpf_is_queue_model_split(vport->txq_model)) netdev->netdev_ops = &idpf_netdev_ops;
netdev->netdev_ops = &idpf_netdev_ops_splitq;
else
netdev->netdev_ops = &idpf_netdev_ops_singleq;
/* setup watchdog timeout value to be 5 second */ /* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ; netdev->watchdog_timeo = 5 * HZ;
@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
{ {
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
unregister_netdev(vport->netdev); unregister_netdev(vport->netdev);
free_netdev(vport->netdev); free_netdev(vport->netdev);
vport->netdev = NULL; vport->netdev = NULL;
@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
if (idpf_is_queue_model_split(vport->rxq_model)) { if (idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
struct idpf_queue *q = const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq; &grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail); writel(q->next_to_alloc, q->tail);
} }
} else { } else {
for (j = 0; j < grp->singleq.num_rxq; j++) { for (j = 0; j < grp->singleq.num_rxq; j++) {
struct idpf_queue *q = const struct idpf_rx_queue *q =
grp->singleq.rxqs[j]; grp->singleq.rxqs[j];
writel(q->next_to_alloc, q->tail); writel(q->next_to_alloc, q->tail);
@ -1855,7 +1853,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_state current_state = np->state; enum idpf_vport_state current_state = np->state;
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport; struct idpf_vport *new_vport;
int err, i; int err;
/* If the system is low on memory, we can end up in bad state if we /* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them * free all the memory for queue resources and try to allocate them
@ -1929,46 +1927,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/ */
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
* the backpointers to the original vport here
*/
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
int j;
tx_qgrp->vport = vport;
for (j = 0; j < tx_qgrp->num_txq; j++)
tx_qgrp->txqs[j]->vport = vport;
if (idpf_is_queue_model_split(vport->txq_model))
tx_qgrp->complq->vport = vport;
}
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
struct idpf_queue *q;
u16 num_rxq;
int j;
rx_qgrp->vport = vport;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
if (idpf_is_queue_model_split(vport->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
q->vport = vport;
}
}
if (reset_cause == IDPF_SR_Q_CHANGE) if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport); idpf_vport_alloc_vec_indexes(vport);
@ -2393,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0; mem->pa = 0;
} }
static const struct net_device_ops idpf_netdev_ops_splitq = { static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open, .ndo_open = idpf_open,
.ndo_stop = idpf_stop, .ndo_stop = idpf_stop,
.ndo_start_xmit = idpf_tx_splitq_start, .ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = idpf_set_mac,
.ndo_change_mtu = idpf_change_mtu,
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
};
static const struct net_device_ops idpf_netdev_ops_singleq = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
.ndo_start_xmit = idpf_tx_singleq_start,
.ndo_features_check = idpf_features_check, .ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode, .ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,

View File

@ -8,6 +8,7 @@
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
/** /**

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */ /* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
#include "idpf.h" #include "idpf.h"
/** /**
@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
* and gets a physical address for each memory location and programs * and gets a physical address for each memory location and programs
* it and the length into the transmit base mode descriptor. * it and the length into the transmit base mode descriptor.
*/ */
static void idpf_tx_singleq_map(struct idpf_queue *tx_q, static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *first, struct idpf_tx_buf *first,
struct idpf_tx_offload_params *offloads) struct idpf_tx_offload_params *offloads)
{ {
@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
data_len = skb->data_len; data_len = skb->data_len;
size = skb_headlen(skb); size = skb_headlen(skb);
tx_desc = IDPF_BASE_TX_DESC(tx_q, i); tx_desc = &tx_q->base_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
/* write each descriptor with CRC bit */ /* write each descriptor with CRC bit */
if (tx_q->vport->crc_enable) if (idpf_queue_has(CRC_EN, tx_q))
td_cmd |= IDPF_TX_DESC_CMD_ICRC; td_cmd |= IDPF_TX_DESC_CMD_ICRC;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++; i++;
if (i == tx_q->desc_count) { if (i == tx_q->desc_count) {
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
i = 0; i = 0;
} }
@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++; i++;
if (i == tx_q->desc_count) { if (i == tx_q->desc_count) {
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
i = 0; i = 0;
} }
@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
/* set next_to_watch value indicating a packet is present */ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; first->next_to_watch = tx_desc;
nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount); netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
* ring entry to reflect that this index is a context descriptor * ring entry to reflect that this index is a context descriptor
*/ */
static struct idpf_base_tx_ctx_desc * static struct idpf_base_tx_ctx_desc *
idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
{ {
struct idpf_base_tx_ctx_desc *ctx_desc; struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use; int ntu = txq->next_to_use;
@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true; txq->tx_buf[ntu].ctx_entry = true;
ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); ctx_desc = &txq->base_ctx[ntu];
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
txq->next_to_use = ntu; txq->next_to_use = ntu;
@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
* @txq: queue to send buffer on * @txq: queue to send buffer on
* @offload: offload parameter structure * @offload: offload parameter structure
**/ **/
static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq,
struct idpf_tx_offload_params *offload) struct idpf_tx_offload_params *offload)
{ {
struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq); struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss); qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync); u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.tx.lso_pkts); u64_stats_inc(&txq->q_stats.lso_pkts);
u64_stats_update_end(&txq->stats_sync); u64_stats_update_end(&txq->stats_sync);
} }
@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
* *
* Returns NETDEV_TX_OK if sent, else an error code * Returns NETDEV_TX_OK if sent, else an error code
*/ */
static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_queue *tx_q) struct idpf_tx_queue *tx_q)
{ {
struct idpf_tx_offload_params offload = { }; struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first; struct idpf_tx_buf *first;
@ -408,33 +410,6 @@ out_drop:
return idpf_tx_drop_skb(tx_q, skb); return idpf_tx_drop_skb(tx_q, skb);
} }
/**
* idpf_tx_singleq_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
struct net_device *netdev)
{
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_queue *tx_q;
tx_q = vport->txqs[skb_get_queue_mapping(skb)];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
*/
if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_OK;
}
return idpf_tx_singleq_frame(skb, tx_q);
}
/** /**
* idpf_tx_singleq_clean - Reclaim resources from queue * idpf_tx_singleq_clean - Reclaim resources from queue
* @tx_q: Tx queue to clean * @tx_q: Tx queue to clean
@ -442,20 +417,19 @@ netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
* @cleaned: returns number of packets cleaned * @cleaned: returns number of packets cleaned
* *
*/ */
static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned) int *cleaned)
{ {
unsigned int budget = tx_q->vport->compln_clean_budget;
unsigned int total_bytes = 0, total_pkts = 0; unsigned int total_bytes = 0, total_pkts = 0;
struct idpf_base_tx_desc *tx_desc; struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean; s16 ntc = tx_q->next_to_clean;
struct idpf_netdev_priv *np; struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf; struct idpf_tx_buf *tx_buf;
struct idpf_vport *vport;
struct netdev_queue *nq; struct netdev_queue *nq;
bool dont_wake; bool dont_wake;
tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); tx_desc = &tx_q->base_tx[ntc];
tx_buf = &tx_q->tx_buf[ntc]; tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) { if (unlikely(!ntc)) {
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf; tx_buf = tx_q->tx_buf;
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
} }
/* unmap any remaining paged data */ /* unmap any remaining paged data */
@ -540,7 +514,7 @@ fetch_next_txq_desc:
if (unlikely(!ntc)) { if (unlikely(!ntc)) {
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf; tx_buf = tx_q->tx_buf;
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
} }
} while (likely(budget)); } while (likely(budget));
@ -550,16 +524,15 @@ fetch_next_txq_desc:
*cleaned += total_pkts; *cleaned += total_pkts;
u64_stats_update_begin(&tx_q->stats_sync); u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts); u64_stats_add(&tx_q->q_stats.packets, total_pkts);
u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes); u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
u64_stats_update_end(&tx_q->stats_sync); u64_stats_update_end(&tx_q->stats_sync);
vport = tx_q->vport; np = netdev_priv(tx_q->netdev);
np = netdev_priv(vport->netdev); nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
dont_wake = np->state != __IDPF_VPORT_UP || dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(vport->netdev); !netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, total_pkts, total_bytes, __netif_txq_completed_wake(nq, total_pkts, total_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake); dont_wake);
@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
budget_per_q = num_txq ? max(budget / num_txq, 1) : 0; budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
for (i = 0; i < num_txq; i++) { for (i = 0; i < num_txq; i++) {
struct idpf_queue *q; struct idpf_tx_queue *q;
q = q_vec->tx[i]; q = q_vec->tx[i];
clean_complete &= idpf_tx_singleq_clean(q, budget_per_q, clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
/** /**
* idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
* @rxq: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer * @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
* @ntc: next to clean
*/ */
static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
union virtchnl2_rx_desc *rx_desc,
struct sk_buff *skb, u16 ntc)
{ {
/* if we are the last buffer then there is nothing else to do */ /* if we are the last buffer then there is nothing else to do */
if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ))) if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
* @rxq: Rx ring being processed * @rxq: Rx ring being processed
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @csum_bits: checksum bits from descriptor * @csum_bits: checksum bits from descriptor
* @ptype: the packet type decoded by hardware * @decoded: the packet type decoded by hardware
* *
* skb->protocol must be set before this function is called * skb->protocol must be set before this function is called
*/ */
static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb, static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
struct idpf_rx_csum_decoded *csum_bits, struct sk_buff *skb,
u16 ptype) struct idpf_rx_csum_decoded csum_bits,
struct libeth_rx_pt decoded)
{ {
struct idpf_rx_ptype_decoded decoded;
bool ipv4, ipv6; bool ipv4, ipv6;
/* check if Rx checksum is enabled */ /* check if Rx checksum is enabled */
if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM))) if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return; return;
/* check if HW has decoded the packet and checksum */ /* check if HW has decoded the packet and checksum */
if (unlikely(!(csum_bits->l3l4p))) if (unlikely(!csum_bits.l3l4p))
return; return;
decoded = rxq->vport->rx_ptype_lkup[ptype]; ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
if (unlikely(!(decoded.known && decoded.outer_ip))) ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
return;
ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
/* Check if there were any checksum errors */ /* Check if there were any checksum errors */
if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe))) if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail; goto checksum_fail;
/* Device could not do any checksum offload for certain extension /* Device could not do any checksum offload for certain extension
* headers as indicated by setting IPV6EXADD bit * headers as indicated by setting IPV6EXADD bit
*/ */
if (unlikely(ipv6 && csum_bits->ipv6exadd)) if (unlikely(ipv6 && csum_bits.ipv6exadd))
return; return;
/* check for L4 errors and handle packets that were not able to be /* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed * checksummed due to arrival speed
*/ */
if (unlikely(csum_bits->l4e)) if (unlikely(csum_bits.l4e))
goto checksum_fail; goto checksum_fail;
if (unlikely(csum_bits->nat && csum_bits->eudpe)) if (unlikely(csum_bits.nat && csum_bits.eudpe))
goto checksum_fail; goto checksum_fail;
/* Handle packets that were not able to be checksummed due to arrival /* Handle packets that were not able to be checksummed due to arrival
* speed, in this case the stack can compute the csum. * speed, in this case the stack can compute the csum.
*/ */
if (unlikely(csum_bits->pprs)) if (unlikely(csum_bits.pprs))
return; return;
/* If there is an outer header present that might contain a checksum /* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that * we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum. * we are indicating we validated the inner checksum.
*/ */
if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT) if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1; skb->csum_level = 1;
/* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case IDPF_RX_PTYPE_INNER_PROT_ICMP:
case IDPF_RX_PTYPE_INNER_PROT_TCP:
case IDPF_RX_PTYPE_INNER_PROT_UDP:
case IDPF_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
return; return;
default:
return;
}
checksum_fail: checksum_fail:
u64_stats_update_begin(&rxq->stats_sync); u64_stats_update_begin(&rxq->stats_sync);
u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync); u64_stats_update_end(&rxq->stats_sync);
} }
/** /**
* idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
* @rx_q: Rx completion queue
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
* @ptype: Rx packet type
* *
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format. * descriptor writeback format.
*
* Return: parsed checksum status.
**/ **/
static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, static struct idpf_rx_csum_decoded
struct sk_buff *skb, idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
union virtchnl2_rx_desc *rx_desc,
u16 ptype)
{ {
struct idpf_rx_csum_decoded csum_bits; struct idpf_rx_csum_decoded csum_bits = { };
u32 rx_error, rx_status; u32 rx_error, rx_status;
u64 qword; u64 qword;
@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
rx_status); rx_status);
csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M, csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
rx_status); rx_status);
csum_bits.nat = 0;
csum_bits.eudpe = 0;
idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); return csum_bits;
} }
/** /**
* idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
* @rx_q: Rx completion queue
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
* @ptype: Rx packet type
* *
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format. * descriptor writeback format.
*
* Return: parsed checksum status.
**/ **/
static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, static struct idpf_rx_csum_decoded
struct sk_buff *skb, idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
union virtchnl2_rx_desc *rx_desc,
u16 ptype)
{ {
struct idpf_rx_csum_decoded csum_bits; struct idpf_rx_csum_decoded csum_bits = { };
u16 rx_status0, rx_status1; u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
rx_status0); rx_status0);
csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M, csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
rx_status1); rx_status1);
csum_bits.pprs = 0;
idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); return csum_bits;
} }
/** /**
@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format. * descriptor writeback format.
**/ **/
static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb, struct sk_buff *skb,
union virtchnl2_rx_desc *rx_desc, const union virtchnl2_rx_desc *rx_desc,
struct idpf_rx_ptype_decoded *decoded) struct libeth_rx_pt decoded)
{ {
u64 mask, qw1; u64 mask, qw1;
if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return; return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
if (FIELD_GET(mask, qw1) == mask) { if (FIELD_GET(mask, qw1) == mask) {
u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss); u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); libeth_rx_pt_set_hash(skb, hash, decoded);
} }
} }
@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format. * descriptor writeback format.
**/ **/
static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb, struct sk_buff *skb,
union virtchnl2_rx_desc *rx_desc, const union virtchnl2_rx_desc *rx_desc,
struct idpf_rx_ptype_decoded *decoded) struct libeth_rx_pt decoded)
{ {
if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return; return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) {
skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash), u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash);
idpf_ptype_to_htype(decoded));
libeth_rx_pt_set_hash(skb, hash, decoded);
}
} }
/** /**
@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
* order to populate the hash, checksum, VLAN, protocol, and * order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb. * other fields within the skb.
*/ */
static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, static void
idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
struct sk_buff *skb, struct sk_buff *skb,
union virtchnl2_rx_desc *rx_desc, const union virtchnl2_rx_desc *rx_desc,
u16 ptype) u16 ptype)
{ {
struct idpf_rx_ptype_decoded decoded = struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
rx_q->vport->rx_ptype_lkup[ptype]; struct idpf_rx_csum_decoded csum_bits;
/* modifies the skb - consumes the enet header */ /* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_q->vport->netdev); skb->protocol = eth_type_trans(skb, rx_q->netdev);
/* Check if we're using base mode descriptor IDs */ /* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded); idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype); csum_bits = idpf_rx_singleq_base_csum(rx_desc);
} else { } else {
idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded); idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded);
idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype); csum_bits = idpf_rx_singleq_flex_csum(rx_desc);
} }
idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
skb_record_rx_queue(skb, rx_q->idx);
}
/**
* idpf_rx_buf_hw_update - Store the new tail and head values
* @rxq: queue to bump
* @val: new head index
*/
static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val)
{
rxq->next_to_use = val;
if (unlikely(!rxq->tail))
return;
/* writel has an implicit memory barrier */
writel(val, rxq->tail);
} }
/** /**
@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
* *
* Returns false if all allocations were successful, true if any fail * Returns false if all allocations were successful, true if any fail
*/ */
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
u16 cleaned_count) u16 cleaned_count)
{ {
struct virtchnl2_singleq_rx_buf_desc *desc; struct virtchnl2_singleq_rx_buf_desc *desc;
const struct libeth_fq_fp fq = {
.pp = rx_q->pp,
.fqes = rx_q->rx_buf,
.truesize = rx_q->truesize,
.count = rx_q->desc_count,
};
u16 nta = rx_q->next_to_alloc; u16 nta = rx_q->next_to_alloc;
struct idpf_rx_buf *buf;
if (!cleaned_count) if (!cleaned_count)
return false; return false;
desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); desc = &rx_q->single_buf[nta];
buf = &rx_q->rx_buf.buf[nta];
do { do {
dma_addr_t addr; dma_addr_t addr;
addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size); addr = libeth_rx_alloc(&fq, nta);
if (unlikely(addr == DMA_MAPPING_ERROR)) if (addr == DMA_MAPPING_ERROR)
break; break;
/* Refresh the desc even if buffer_addrs didn't change /* Refresh the desc even if buffer_addrs didn't change
@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
desc->hdr_addr = 0; desc->hdr_addr = 0;
desc++; desc++;
buf++;
nta++; nta++;
if (unlikely(nta == rx_q->desc_count)) { if (unlikely(nta == rx_q->desc_count)) {
desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); desc = &rx_q->single_buf[0];
buf = rx_q->rx_buf.buf;
nta = 0; nta = 0;
} }
@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
/** /**
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process * @rx_desc: the descriptor to process
* @fields: storage for extracted values * @fields: storage for extracted values
* *
@ -943,8 +912,8 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format. * descriptor writeback format.
*/ */
static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, static void
union virtchnl2_rx_desc *rx_desc, idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
struct idpf_rx_extracted *fields) struct idpf_rx_extracted *fields)
{ {
u64 qword; u64 qword;
@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
/** /**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process * @rx_desc: the descriptor to process
* @fields: storage for extracted values * @fields: storage for extracted values
* *
@ -967,8 +935,8 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format. * descriptor writeback format.
*/ */
static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, static void
union virtchnl2_rx_desc *rx_desc, idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
struct idpf_rx_extracted *fields) struct idpf_rx_extracted *fields)
{ {
fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
* @fields: storage for extracted values * @fields: storage for extracted values
* *
*/ */
static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, static void
union virtchnl2_rx_desc *rx_desc, idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
struct idpf_rx_extracted *fields) struct idpf_rx_extracted *fields)
{ {
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields); idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else else
idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields); idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
} }
/** /**
@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
* *
* Returns true if there's any budget left (e.g. the clean is finished) * Returns true if there's any budget left (e.g. the clean is finished)
*/ */
static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
struct sk_buff *skb = rx_q->skb; struct sk_buff *skb = rx_q->skb;
@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
struct idpf_rx_buf *rx_buf; struct idpf_rx_buf *rx_buf;
/* get the Rx desc from Rx queue based on 'next_to_clean' */ /* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = IDPF_RX_DESC(rx_q, ntc); rx_desc = &rx_q->rx[ntc];
/* status_error_ptype_len will always be zero for unused /* status_error_ptype_len will always be zero for unused
* descriptors because it's cleared in cleanup, and overlaps * descriptors because it's cleared in cleanup, and overlaps
@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf.buf[ntc]; rx_buf = &rx_q->rx_buf[ntc];
if (!fields.size) { if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
idpf_rx_put_page(rx_buf);
goto skip_data; goto skip_data;
}
idpf_rx_sync_for_cpu(rx_buf, fields.size);
if (skb) if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size); idpf_rx_add_frag(rx_buf, skb, fields.size);
else else
skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size); skb = idpf_rx_build_skb(rx_buf, fields.size);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (!skb) if (!skb)
break; break;
skip_data: skip_data:
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); rx_buf->page = NULL;
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++; cleaned_count++;
/* skip if it is non EOP desc */ /* skip if it is non EOP desc */
if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc)) if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
continue; continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ #define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
@ -1084,7 +1051,7 @@ skip_data:
rx_desc, fields.rx_ptype); rx_desc, fields.rx_ptype);
/* send completed skb up the stack */ /* send completed skb up the stack */
napi_gro_receive(&rx_q->q_vector->napi, skb); napi_gro_receive(rx_q->pp->p.napi, skb);
skb = NULL; skb = NULL;
/* update budget accounting */ /* update budget accounting */
@ -1095,12 +1062,13 @@ skip_data:
rx_q->next_to_clean = ntc; rx_q->next_to_clean = ntc;
page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count) if (cleaned_count)
failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync); u64_stats_update_begin(&rx_q->stats_sync);
u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts); u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes); u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rx_q->stats_sync); u64_stats_update_end(&rx_q->stats_sync);
/* guarantee a trip back through this routine if there was a failure */ /* guarantee a trip back through this routine if there was a failure */
@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
*/ */
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) { for (i = 0; i < num_rxq; i++) {
struct idpf_queue *rxq = q_vec->rx[i]; struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q; int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q); pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,13 @@
#ifndef _IDPF_TXRX_H_ #ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_ #define _IDPF_TXRX_H_
#include <net/page_pool/helpers.h> #include <linux/dim.h>
#include <net/libeth/cache.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <net/netdev_queues.h> #include <net/netdev_queues.h>
#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h" #include "virtchnl2_lan_desc.h"
#define IDPF_LARGE_MAX_Q 256 #define IDPF_LARGE_MAX_Q 256
@ -83,7 +86,7 @@
do { \ do { \
if (unlikely(++(ntc) == (rxq)->desc_count)) { \ if (unlikely(++(ntc) == (rxq)->desc_count)) { \
ntc = 0; \ ntc = 0; \
change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \ idpf_queue_change(GEN_CHK, rxq); \
} \ } \
} while (0) } while (0)
@ -93,16 +96,10 @@ do { \
idx = 0; \ idx = 0; \
} while (0) } while (0)
#define IDPF_RX_HDR_SIZE 256
#define IDPF_RX_BUF_2048 2048
#define IDPF_RX_BUF_4096 4096
#define IDPF_RX_BUF_STRIDE 32 #define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16 #define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64 #define IDPF_LOW_WATERMARK 64
/* Size of header buffer specifically for header split */
#define IDPF_HDR_BUF_SIZE 256
#define IDPF_PACKET_HDR_PAD \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
#define IDPF_TX_TSO_MIN_MSS 88 #define IDPF_TX_TSO_MIN_MSS 88
/* Minimum number of descriptors between 2 descriptors with the RE bit set; /* Minimum number of descriptors between 2 descriptors with the RE bit set;
@ -110,36 +107,17 @@ do { \
*/ */
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64 #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
#define IDPF_RX_BI_BUFID_S 0 #define IDPF_RX_BI_GEN_M BIT(16)
#define IDPF_RX_BI_BUFID_M GENMASK(14, 0) #define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RX_BI_GEN_S 15
#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
#define IDPF_BASE_TX_DESC(txq, i) \
(&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
#define IDPF_BASE_TX_CTX_DESC(txq, i) \
(&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
(&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
#define IDPF_FLEX_TX_DESC(txq, i) \
(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
#define IDPF_DESC_UNUSED(txq) \ #define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1) (txq)->next_to_clean - (txq)->next_to_use - 1)
#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top) #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
(txq)->desc_count >> 2) (txq)->desc_count >> 2)
@ -315,16 +293,7 @@ struct idpf_rx_extracted {
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \ #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
#define IDPF_RX_DMA_ATTR \ #define idpf_rx_buf libeth_fqe
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#define IDPF_RX_DESC(rxq, i) \
(&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
struct idpf_rx_buf {
struct page *page;
unsigned int page_offset;
u16 truesize;
};
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
@ -348,72 +317,6 @@ struct idpf_rx_buf {
#define IDPF_RX_MAX_BASE_PTYPE 256 #define IDPF_RX_MAX_BASE_PTYPE 256
#define IDPF_INVALID_PTYPE_ID 0xFFFF #define IDPF_INVALID_PTYPE_ID 0xFFFF
/* Packet type non-ip values */
enum idpf_rx_ptype_l2 {
IDPF_RX_PTYPE_L2_RESERVED = 0,
IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
IDPF_RX_PTYPE_L2_ARP = 11,
};
enum idpf_rx_ptype_outer_ip {
IDPF_RX_PTYPE_OUTER_L2 = 0,
IDPF_RX_PTYPE_OUTER_IP = 1,
};
#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
(((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
((ptype)->outer_ip_ver == (ipv)))
enum idpf_rx_ptype_outer_ip_ver {
IDPF_RX_PTYPE_OUTER_NONE = 0,
IDPF_RX_PTYPE_OUTER_IPV4 = 1,
IDPF_RX_PTYPE_OUTER_IPV6 = 2,
};
enum idpf_rx_ptype_outer_fragmented {
IDPF_RX_PTYPE_NOT_FRAG = 0,
IDPF_RX_PTYPE_FRAG = 1,
};
enum idpf_rx_ptype_tunnel_type {
IDPF_RX_PTYPE_TUNNEL_NONE = 0,
IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
};
enum idpf_rx_ptype_tunnel_end_prot {
IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
};
enum idpf_rx_ptype_inner_prot {
IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
};
enum idpf_rx_ptype_payload_layer {
IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
enum idpf_tunnel_state { enum idpf_tunnel_state {
IDPF_PTYPE_TUNNEL_IP = BIT(0), IDPF_PTYPE_TUNNEL_IP = BIT(0),
IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1), IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
@ -421,22 +324,9 @@ enum idpf_tunnel_state {
}; };
struct idpf_ptype_state { struct idpf_ptype_state {
bool outer_ip; bool outer_ip:1;
bool outer_frag; bool outer_frag:1;
u8 tunnel_state; u8 tunnel_state:6;
};
struct idpf_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:2;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:4;
u32 payload_layer:3;
}; };
/** /**
@ -452,23 +342,37 @@ struct idpf_rx_ptype_decoded {
* to 1 and knows that reading a gen bit of 1 in any * to 1 and knows that reading a gen bit of 1 in any
* descriptor on the initial pass of the ring indicates a * descriptor on the initial pass of the ring indicates a
* writeback. It also flips on every ring wrap. * writeback. It also flips on every ring wrap.
* @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
* and RFLGQ_GEN is the SW bit. * bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_FLAGS_NBITS: Must be last * @__IDPF_Q_FLAGS_NBITS: Must be last
*/ */
enum idpf_queue_flags_t { enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK, __IDPF_Q_GEN_CHK,
__IDPF_RFLQ_GEN_CHK, __IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN, __IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER, __IDPF_Q_SW_MARKER,
__IDPF_Q_POLL_MODE, __IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_FLAGS_NBITS, __IDPF_Q_FLAGS_NBITS,
}; };
#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_has_clear(f, q) \
__test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_assign(f, q, v) \
__assign_bit(__IDPF_Q_##f, (q)->flags, v)
/** /**
* struct idpf_vec_regs * struct idpf_vec_regs
* @dyn_ctl_reg: Dynamic control interrupt register offset * @dyn_ctl_reg: Dynamic control interrupt register offset
@ -509,54 +413,68 @@ struct idpf_intr_reg {
/** /**
* struct idpf_q_vector * struct idpf_q_vector
* @vport: Vport back pointer * @vport: Vport back pointer
* @affinity_mask: CPU affinity mask * @num_rxq: Number of RX queues
* @napi: napi handler
* @v_idx: Vector index
* @intr_reg: See struct idpf_intr_reg
* @num_txq: Number of TX queues * @num_txq: Number of TX queues
* @num_bufq: Number of buffer queues
* @num_complq: number of completion queues
* @rx: Array of RX queues to service
* @tx: Array of TX queues to service * @tx: Array of TX queues to service
* @bufq: Array of buffer queues to service
* @complq: array of completion queues
* @intr_reg: See struct idpf_intr_reg
* @napi: napi handler
* @total_events: Number of interrupts processed
* @tx_dim: Data for TX net_dim algorithm * @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate * @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not * @tx_intr_mode: Dynamic ITR or not
* @tx_itr_idx: TX ITR index * @tx_itr_idx: TX ITR index
* @num_rxq: Number of RX queues
* @rx: Array of RX queues to service
* @rx_dim: Data for RX net_dim algorithm * @rx_dim: Data for RX net_dim algorithm
* @rx_itr_value: RX interrupt throttling rate * @rx_itr_value: RX interrupt throttling rate
* @rx_intr_mode: Dynamic ITR or not * @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index * @rx_itr_idx: RX ITR index
* @num_bufq: Number of buffer queues * @v_idx: Vector index
* @bufq: Array of buffer queues to service * @affinity_mask: CPU affinity mask
* @total_events: Number of interrupts processed
* @name: Queue vector name
*/ */
struct idpf_q_vector { struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport; struct idpf_vport *vport;
cpumask_t affinity_mask;
struct napi_struct napi;
u16 v_idx;
struct idpf_intr_reg intr_reg;
u16 num_rxq;
u16 num_txq; u16 num_txq;
struct idpf_queue **tx; u16 num_bufq;
u16 num_complq;
struct idpf_rx_queue **rx;
struct idpf_tx_queue **tx;
struct idpf_buf_queue **bufq;
struct idpf_compl_queue **complq;
struct idpf_intr_reg intr_reg;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
struct napi_struct napi;
u16 total_events;
struct dim tx_dim; struct dim tx_dim;
u16 tx_itr_value; u16 tx_itr_value;
bool tx_intr_mode; bool tx_intr_mode;
u32 tx_itr_idx; u32 tx_itr_idx;
u16 num_rxq;
struct idpf_queue **rx;
struct dim rx_dim; struct dim rx_dim;
u16 rx_itr_value; u16 rx_itr_value;
bool rx_intr_mode; bool rx_intr_mode;
u32 rx_itr_idx; u32 rx_itr_idx;
__cacheline_group_end_aligned(read_write);
u16 num_bufq; __cacheline_group_begin_aligned(cold);
struct idpf_queue **bufq; u16 v_idx;
u16 total_events; cpumask_var_t affinity_mask;
char *name; __cacheline_group_end_aligned(cold);
}; };
libeth_cacheline_set_assert(struct idpf_q_vector, 104,
424 + 2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
struct idpf_rx_queue_stats { struct idpf_rx_queue_stats {
u64_stats_t packets; u64_stats_t packets;
@ -583,11 +501,6 @@ struct idpf_cleaned_stats {
u32 bytes; u32 bytes;
}; };
union idpf_queue_stats {
struct idpf_rx_queue_stats rx;
struct idpf_tx_queue_stats tx;
};
#define IDPF_ITR_DYNAMIC 1 #define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0 #define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032 #define IDPF_ITR_20K 0x0032
@ -603,68 +516,123 @@ union idpf_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1 #define IDPF_DIM_DEFAULT_PROFILE_IX 1
/** /**
* struct idpf_queue * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
* @dev: Device back pointer for DMA mapping * @buf_stack: Stack of empty buffers to store buffer info for out of order
* @vport: Back pointer to associated vport * buffer completions. See struct idpf_buf_lifo
* @txq_grp: See struct idpf_txq_group * @sched_buf_hash: Hash table to store buffers
* @rxq_grp: See struct idpf_rxq_group */
* @idx: For buffer queue, it is used as group id, either 0 or 1. On clean, struct idpf_txq_stash {
* buffer queue uses this index to determine which group of refill queues struct idpf_buf_lifo buf_stack;
* to clean. DECLARE_HASHTABLE(sched_buf_hash, 12);
* For TX queue, it is used as index to map between TX queue group and } ____cacheline_aligned;
* hot path TX pointers stored in vport. Used in both singleq/splitq.
* For RX queue, it is used to index to total RX queue across groups and /**
* used for skb reporting. * struct idpf_rx_queue - software structure representing a receive queue
* @tail: Tail offset. Used for both queue models single and split. In splitq * @rx: universal receive descriptor array
* model relevant only for TX queue and RX queue. * @single_buf: buffer descriptor array in singleq
* @tx_buf: See struct idpf_tx_buf * @desc_ring: virtual descriptor ring address
* @rx_buf: Struct with RX buffer related members * @bufq_sets: Pointer to the array of buffer queues in splitq mode
* @rx_buf.buf: See struct idpf_rx_buf * @napi: NAPI instance corresponding to this queue (splitq)
* @rx_buf.hdr_buf_pa: DMA handle * @rx_buf: See struct &libeth_fqe
* @rx_buf.hdr_buf_va: Virtual address * @pp: Page pool pointer in singleq mode
* @pp: Page pool pointer * @netdev: &net_device corresponding to this queue
* @skb: Pointer to the skb * @tail: Tail offset. Used for both queue models single and split.
* @q_type: Queue type (TX, RX, TX completion, RX buffer)
* @q_id: Queue id
* @desc_count: Number of descriptors
* @next_to_use: Next descriptor to use. Relevant in both split & single txq
* and bufq.
* @next_to_clean: Next descriptor to clean. In split queue model, only
* relevant to TX completion queue and RX queue.
* @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
* only relevant to RX queue.
* @flags: See enum idpf_queue_flags_t * @flags: See enum idpf_queue_flags_t
* @q_stats: See union idpf_queue_stats * @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
* @desc_count: Number of descriptors
* @rxdids: Supported RX descriptor ids
* @rx_ptype_lkup: LUT of Rx ptypes
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
* @skb: Pointer to the skb
* @truesize: data buffer truesize in singleq
* @stats_sync: See struct u64_stats_sync * @stats_sync: See struct u64_stats_sync
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on * @q_stats: See union idpf_rx_queue_stats
* the TX completion queue, it can be for any TXQ associated * @q_id: Queue id
* with that completion queue. This means we can clean up to * @size: Length of descriptor ring in bytes
* N TXQs during a single call to clean the completion queue. * @dma: Physical address of ring
* cleaned_bytes|pkts tracks the clean stats per TXQ during * @q_vector: Backreference to associated vector
* that single call to clean the completion queue. By doing so, * @rx_buffer_low_watermark: RX buffer low watermark
* we can update BQL with aggregate cleaned stats for each TXQ
* only once at the end of the cleaning routine.
* @cleaned_pkts: Number of packets cleaned for the above said case
* @rx_hsplit_en: RX headsplit enable
* @rx_hbuf_size: Header buffer size * @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size * @rx_buf_size: Buffer size
* @rx_max_pkt_size: RX max packet size * @rx_max_pkt_size: RX max packet size
* @rx_buf_stride: RX buffer stride */
* @rx_buffer_low_watermark: RX buffer low watermark struct idpf_rx_queue {
* @rxdids: Supported RX descriptor ids __cacheline_group_begin_aligned(read_mostly);
* @q_vector: Backreference to associated vector union {
* @size: Length of descriptor ring in bytes union virtchnl2_rx_desc *rx;
* @dma: Physical address of ring struct virtchnl2_singleq_rx_buf_desc *single_buf;
* @desc_ring: Descriptor ring memory
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather void *desc_ring;
};
union {
struct {
struct idpf_bufq_set *bufq_sets;
struct napi_struct *napi;
};
struct {
struct libeth_fqe *rx_buf;
struct page_pool *pp;
};
};
struct net_device *netdev;
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
u32 rxdids;
const struct libeth_rx_pt *rx_ptype_lkup;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
struct sk_buff *skb;
u32 truesize;
struct u64_stats_sync stats_sync;
struct idpf_rx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
u16 rx_buffer_low_watermark;
u16 rx_hbuf_size;
u16 rx_buf_size;
u16 rx_max_pkt_size;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
80 + sizeof(struct u64_stats_sync),
32);
/**
* struct idpf_tx_queue - software structure representing a transmit queue
* @base_tx: base Tx descriptor array
* @base_ctx: base Tx context descriptor array
* @flex_tx: flex Tx descriptor array
* @flex_ctx: flex Tx context descriptor array
* @desc_ring: virtual descriptor ring address
* @tx_buf: See struct idpf_tx_buf
* @txq_grp: See struct idpf_txq_group
* @dev: Device back pointer for DMA mapping
* @tail: Tail offset. Used for both queue models single and split
* @flags: See enum idpf_queue_flags_t
* @idx: For TX queue, it is used as index to map between TX queue group and
* hot path TX pointers stored in vport. Used in both singleq/splitq.
* @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length * @tx_min_pkt_len: Min supported packet length
* @num_completions: Only relevant for TX completion queue. It tracks the
* number of completions received to compare against the
* number of completions pending, as accumulated by the
* TX queues.
* @buf_stack: Stack of empty buffers to store buffer info for out of order
* buffer completions. See struct idpf_buf_lifo.
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_gen_s: Completion tag generation bit * @compl_tag_gen_s: Completion tag generation bit
* The format of the completion tag will change based on the TXQ * The format of the completion tag will change based on the TXQ
* descriptor ring size so that we can maintain roughly the same level * descriptor ring size so that we can maintain roughly the same level
@ -685,108 +653,238 @@ union idpf_queue_stats {
* -------------------------------- * --------------------------------
* *
* This gives us 8*8160 = 65280 possible unique values. * This gives us 8*8160 = 65280 possible unique values.
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
* the TX completion queue, it can be for any TXQ associated
* with that completion queue. This means we can clean up to
* N TXQs during a single call to clean the completion queue.
* cleaned_bytes|pkts tracks the clean stats per TXQ during
* that single call to clean the completion queue. By doing so,
* we can update BQL with aggregate cleaned stats for each TXQ
* only once at the end of the cleaning routine.
* @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @stash: Tx buffer stash for Flow-based scheduling mode
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
* @sched_buf_hash: Hash table to stores buffers * @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_tx_queue_stats
* @q_id: Queue id
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
*/ */
struct idpf_queue { struct idpf_tx_queue {
struct device *dev; __cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport;
union { union {
struct idpf_txq_group *txq_grp; struct idpf_base_tx_desc *base_tx;
struct idpf_rxq_group *rxq_grp; struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
struct idpf_flex_tx_ctx_desc *flex_ctx;
void *desc_ring;
}; };
u16 idx;
void __iomem *tail;
union {
struct idpf_tx_buf *tx_buf; struct idpf_tx_buf *tx_buf;
struct { struct idpf_txq_group *txq_grp;
struct idpf_rx_buf *buf; struct device *dev;
dma_addr_t hdr_buf_pa; void __iomem *tail;
void *hdr_buf_va;
} rx_buf; DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
}; u16 idx;
struct page_pool *pp;
struct sk_buff *skb;
u16 q_type;
u32 q_id;
u16 desc_count; u16 desc_count;
u16 next_to_use; u16 tx_min_pkt_len;
u16 next_to_clean;
u16 next_to_alloc;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
union idpf_queue_stats q_stats;
struct u64_stats_sync stats_sync;
u32 cleaned_bytes;
u16 cleaned_pkts;
bool rx_hsplit_en;
u16 rx_hbuf_size;
u16 rx_buf_size;
u16 rx_max_pkt_size;
u16 rx_buf_stride;
u8 rx_buffer_low_watermark;
u64 rxdids;
struct idpf_q_vector *q_vector;
unsigned int size;
dma_addr_t dma;
void *desc_ring;
u16 tx_max_bufs;
u8 tx_min_pkt_len;
u32 num_completions;
struct idpf_buf_lifo buf_stack;
u16 compl_tag_bufid_m;
u16 compl_tag_gen_s; u16 compl_tag_gen_s;
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
union {
u32 cleaned_bytes;
u32 clean_budget;
};
u16 cleaned_pkts;
u16 tx_max_bufs;
struct idpf_txq_stash *stash;
u16 compl_tag_bufid_m;
u16 compl_tag_cur_gen; u16 compl_tag_cur_gen;
u16 compl_tag_gen_max; u16 compl_tag_gen_max;
DECLARE_HASHTABLE(sched_buf_hash, 12); struct u64_stats_sync stats_sync;
} ____cacheline_internodealigned_in_smp; struct idpf_tx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
88 + sizeof(struct u64_stats_sync),
24);
/**
* struct idpf_buf_queue - software structure representing a buffer queue
* @split_buf: buffer descriptor array
* @hdr_buf: &libeth_fqe for header buffers
* @hdr_pp: &page_pool for header buffers
* @buf: &libeth_fqe for data buffers
* @pp: &page_pool for data buffers
* @tail: Tail offset
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
* @hdr_truesize: truesize for buffer headers
* @truesize: truesize for data buffers
* @q_id: Queue id
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
* @rx_buffer_low_watermark: RX buffer low watermark
* @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size
*/
struct idpf_buf_queue {
__cacheline_group_begin_aligned(read_mostly);
struct virtchnl2_splitq_rx_buf_desc *split_buf;
struct libeth_fqe *hdr_buf;
struct page_pool *hdr_pp;
struct libeth_fqe *buf;
struct page_pool *pp;
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
u32 next_to_alloc;
u32 hdr_truesize;
u32 truesize;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
u16 rx_buffer_low_watermark;
u16 rx_hbuf_size;
u16 rx_buf_size;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
/**
* struct idpf_compl_queue - software structure representing a completion queue
* @comp: completion descriptor array
* @txq_grp: See struct idpf_txq_group
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
* @clean_budget: queue cleaning budget
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use. Relevant in both split & single txq
* and bufq.
* @next_to_clean: Next descriptor to clean
* @num_completions: Only relevant for TX completion queue. It tracks the
* number of completions received to compare against the
* number of completions pending, as accumulated by the
* TX queues.
* @q_id: Queue id
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
*/
struct idpf_compl_queue {
__cacheline_group_begin_aligned(read_mostly);
struct idpf_splitq_tx_compl_desc *comp;
struct idpf_txq_group *txq_grp;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
u32 clean_budget;
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
u32 num_completions;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
/** /**
* struct idpf_sw_queue * struct idpf_sw_queue
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: Buffer to allocate at
* @flags: See enum idpf_queue_flags_t
* @ring: Pointer to the ring * @ring: Pointer to the ring
* @flags: See enum idpf_queue_flags_t
* @desc_count: Descriptor count * @desc_count: Descriptor count
* @dev: Device back pointer for DMA mapping * @next_to_use: Buffer to allocate at
* @next_to_clean: Next descriptor to clean
* *
* Software queues are used in splitq mode to manage buffers between rxq * Software queues are used in splitq mode to manage buffers between rxq
* producer and the bufq consumer. These are required in order to maintain a * producer and the bufq consumer. These are required in order to maintain a
* lockless buffer management system and are strictly software only constructs. * lockless buffer management system and are strictly software only constructs.
*/ */
struct idpf_sw_queue { struct idpf_sw_queue {
u16 next_to_clean; __cacheline_group_begin_aligned(read_mostly);
u16 next_to_alloc; u32 *ring;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 *ring; u32 desc_count;
u16 desc_count; __cacheline_group_end_aligned(read_mostly);
struct device *dev;
} ____cacheline_internodealigned_in_smp; __cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
__cacheline_group_end_aligned(read_write);
};
libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
/** /**
* struct idpf_rxq_set * struct idpf_rxq_set
* @rxq: RX queue * @rxq: RX queue
* @refillq0: Pointer to refill queue 0 * @refillq: pointers to refill queues
* @refillq1: Pointer to refill queue 1
* *
* Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
* Each rxq needs a refillq to return used buffers back to the respective bufq. * Each rxq needs a refillq to return used buffers back to the respective bufq.
* Bufqs then clean these refillqs for buffers to give to hardware. * Bufqs then clean these refillqs for buffers to give to hardware.
*/ */
struct idpf_rxq_set { struct idpf_rxq_set {
struct idpf_queue rxq; struct idpf_rx_queue rxq;
struct idpf_sw_queue *refillq0; struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
struct idpf_sw_queue *refillq1;
}; };
/** /**
@ -805,7 +903,7 @@ struct idpf_rxq_set {
* managed by at most two bufqs (depending on performance configuration). * managed by at most two bufqs (depending on performance configuration).
*/ */
struct idpf_bufq_set { struct idpf_bufq_set {
struct idpf_queue bufq; struct idpf_buf_queue bufq;
int num_refillqs; int num_refillqs;
struct idpf_sw_queue *refillqs; struct idpf_sw_queue *refillqs;
}; };
@ -831,7 +929,7 @@ struct idpf_rxq_group {
union { union {
struct { struct {
u16 num_rxq; u16 num_rxq;
struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q]; struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
} singleq; } singleq;
struct { struct {
u16 num_rxq_sets; u16 num_rxq_sets;
@ -846,6 +944,7 @@ struct idpf_rxq_group {
* @vport: Vport back pointer * @vport: Vport back pointer
* @num_txq: Number of TX queues associated * @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers * @txqs: Array of TX queue pointers
* @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only * @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the * @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues * completion queue, acculumated for all TX queues
@ -859,13 +958,26 @@ struct idpf_txq_group {
struct idpf_vport *vport; struct idpf_vport *vport;
u16 num_txq; u16 num_txq;
struct idpf_queue *txqs[IDPF_LARGE_MAX_Q]; struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
struct idpf_txq_stash *stashes;
struct idpf_queue *complq; struct idpf_compl_queue *complq;
u32 num_completions_pending; u32 num_completions_pending;
}; };
static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
{
u32 cpu;
if (!q_vector)
return NUMA_NO_NODE;
cpu = cpumask_first(q_vector->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
/** /**
* idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
* @size: transmit request size in bytes * @size: transmit request size in bytes
@ -921,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
} }
/**
* idpf_alloc_page - Allocate a new RX buffer from the page pool
* @pool: page_pool to allocate from
* @buf: metadata struct to populate with page info
* @buf_size: 2K or 4K
*
* Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
*/
static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
struct idpf_rx_buf *buf,
unsigned int buf_size)
{
if (buf_size == IDPF_RX_BUF_2048)
buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
buf_size);
else
buf->page = page_pool_dev_alloc_pages(pool);
if (!buf->page)
return DMA_MAPPING_ERROR;
buf->truesize = buf_size;
return page_pool_get_dma_addr(buf->page) + buf->page_offset +
pool->p.offset;
}
/**
* idpf_rx_put_page - Return RX buffer page to pool
* @rx_buf: RX buffer metadata struct
*/
static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
{
page_pool_put_page(rx_buf->page->pp, rx_buf->page,
rx_buf->truesize, true);
rx_buf->page = NULL;
}
/**
* idpf_rx_sync_for_cpu - Synchronize DMA buffer
* @rx_buf: RX buffer metadata struct
* @len: frame length from descriptor
*/
static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
{
struct page *page = rx_buf->page;
struct page_pool *pp = page->pp;
dma_sync_single_range_for_cpu(pp->p.dev,
page_pool_get_dma_addr(page),
rx_buf->page_offset + pp->p.offset, len,
page_pool_get_dma_dir(pp));
}
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport, void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg); struct virtchnl2_create_vport *vport_msg);
@ -991,35 +1049,27 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport); void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport); int idpf_vport_intr_init(struct idpf_vport *vport);
void idpf_vport_intr_ena(struct idpf_vport *vport); void idpf_vport_intr_ena(struct idpf_vport *vport);
enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
int idpf_config_rss(struct idpf_vport *vport); int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport);
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size); unsigned int size);
struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
struct idpf_rx_buf *rx_buf, void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
unsigned int size);
bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
bool xmit_more); bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size); unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb); netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx); struct idpf_tx_buf *first, u16 ring_idx);
unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb); struct sk_buff *skb);
bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
unsigned int count);
int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct net_device *netdev); struct idpf_tx_queue *tx_q);
netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
struct net_device *netdev); bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
u16 cleaned_count); u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */ /* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h" #include "idpf_virtchnl.h"
@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
int i; int i;
for (i = 0; i < vport->num_txq; i++) for (i = 0; i < vport->num_txq; i++)
set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); idpf_queue_set(SW_MARKER, vport->txqs[i]);
event = wait_event_timeout(vport->sw_marker_wq, event = wait_event_timeout(vport->sw_marker_wq,
test_and_clear_bit(IDPF_VPORT_SW_MARKER, test_and_clear_bit(IDPF_VPORT_SW_MARKER,
@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
msecs_to_jiffies(500)); msecs_to_jiffies(500));
for (i = 0; i < vport->num_txq; i++) for (i = 0; i < vport->num_txq; i++)
clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); idpf_queue_clear(POLL_MODE, vport->txqs[i]);
if (event) if (event)
return 0; return 0;
@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
int num_regs, u32 q_type) int num_regs, u32 q_type)
{ {
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue *q;
int i, j, k = 0; int i, j, k = 0;
switch (q_type) { switch (q_type) {
@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u16 num_rxq = rx_qgrp->singleq.num_rxq; u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) { for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
struct idpf_rx_queue *q;
q = rx_qgrp->singleq.rxqs[j]; q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter, q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]); reg_vals[k]);
@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u8 num_bufqs = vport->num_bufqs_per_qgrp; u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq; q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->tail = idpf_get_reg_addr(adapter, q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]); reg_vals[k]);
@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
vport_msg->vport_index = cpu_to_le16(idx); vport_msg->vport_index = cpu_to_le16(idx);
if (adapter->req_tx_splitq) if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else else
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
if (adapter->req_rx_splitq) if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else else
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport_msg = adapter->vport_params_recvd[vport->idx]; vport_msg = adapter->vport_params_recvd[vport->idx];
if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
(vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
return -EOPNOTSUPP;
}
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { if (idpf_is_queue_model_split(vport->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport->base_rxd = true; vport->base_rxd = true;
} }
if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) if (!idpf_is_queue_model_split(vport->txq_model))
return 0; return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].model = qi[k].model =
cpu_to_le16(vport->txq_model); cpu_to_le16(vport->txq_model);
qi[k].type = qi[k].type =
cpu_to_le32(tx_qgrp->txqs[j]->q_type); cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi[k].ring_len = qi[k].ring_len =
cpu_to_le16(tx_qgrp->txqs[j]->desc_count); cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
qi[k].dma_ring_addr = qi[k].dma_ring_addr =
cpu_to_le64(tx_qgrp->txqs[j]->dma); cpu_to_le64(tx_qgrp->txqs[j]->dma);
if (idpf_is_queue_model_split(vport->txq_model)) { if (idpf_is_queue_model_split(vport->txq_model)) {
struct idpf_queue *q = tx_qgrp->txqs[j]; struct idpf_tx_queue *q = tx_qgrp->txqs[j];
qi[k].tx_compl_queue_id = qi[k].tx_compl_queue_id =
cpu_to_le16(tx_qgrp->complq->q_id); cpu_to_le16(tx_qgrp->complq->q_id);
qi[k].relative_queue_id = cpu_to_le16(j); qi[k].relative_queue_id = cpu_to_le16(j);
if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) if (idpf_queue_has(FLOW_SCH_EN, q))
qi[k].sched_mode = qi[k].sched_mode =
cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
else else
@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qi[k].model = cpu_to_le16(vport->txq_model); qi[k].model = cpu_to_le16(vport->txq_model);
qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
else else
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto setup_rxqs; goto setup_rxqs;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
struct idpf_queue *bufq = struct idpf_buf_queue *bufq =
&rx_qgrp->splitq.bufq_sets[j].bufq; &rx_qgrp->splitq.bufq_sets[j].bufq;
qi[k].queue_id = cpu_to_le32(bufq->q_id); qi[k].queue_id = cpu_to_le32(bufq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model); qi[k].model = cpu_to_le16(vport->rxq_model);
qi[k].type = cpu_to_le32(bufq->q_type); qi[k].type =
cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi[k].ring_len = cpu_to_le16(bufq->desc_count); qi[k].ring_len = cpu_to_le16(bufq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
qi[k].buffer_notif_stride = bufq->rx_buf_stride; qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
qi[k].rx_buffer_low_watermark = qi[k].rx_buffer_low_watermark =
cpu_to_le16(bufq->rx_buffer_low_watermark); cpu_to_le16(bufq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
@ -1591,35 +1604,47 @@ setup_rxqs:
num_rxq = rx_qgrp->singleq.num_rxq; num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) { for (j = 0; j < num_rxq; j++, k++) {
struct idpf_queue *rxq; const struct idpf_bufq_set *sets;
struct idpf_rx_queue *rxq;
if (!idpf_is_queue_model_split(vport->rxq_model)) { if (!idpf_is_queue_model_split(vport->rxq_model)) {
rxq = rx_qgrp->singleq.rxqs[j]; rxq = rx_qgrp->singleq.rxqs[j];
goto common_qi_fields; goto common_qi_fields;
} }
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
qi[k].rx_bufq1_id = sets = rxq->bufq_sets;
cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
/* In splitq mode, RXQ buffer size should be
* set to that of the first buffer queue
* associated with this RXQ.
*/
rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi[k].bufq2_ena = IDPF_BUFQ2_ENA; qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
qi[k].rx_bufq2_id = qi[k].rx_bufq2_id =
cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); cpu_to_le16(sets[1].bufq.q_id);
} }
qi[k].rx_buffer_low_watermark = qi[k].rx_buffer_low_watermark =
cpu_to_le16(rxq->rx_buffer_low_watermark); cpu_to_le16(rxq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
common_qi_fields: rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
if (rxq->rx_hsplit_en) {
if (idpf_queue_has(HSPLIT_EN, rxq)) {
qi[k].qflags |= qi[k].qflags |=
cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
qi[k].hdr_buffer_size = qi[k].hdr_buffer_size =
cpu_to_le16(rxq->rx_hbuf_size); cpu_to_le16(rxq->rx_hbuf_size);
} }
common_qi_fields:
qi[k].queue_id = cpu_to_le32(rxq->q_id); qi[k].queue_id = cpu_to_le32(rxq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model); qi[k].model = cpu_to_le16(vport->rxq_model);
qi[k].type = cpu_to_le32(rxq->q_type); qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi[k].ring_len = cpu_to_le16(rxq->desc_count); qi[k].ring_len = cpu_to_le16(rxq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) { for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
for (i = 0; i < vport->num_txq_grp; i++, k++) { for (i = 0; i < vport->num_txq_grp; i++, k++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
@ -1741,12 +1766,12 @@ setup_rx:
qc[k].start_queue_id = qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
qc[k].type = qc[k].type =
cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
} else { } else {
qc[k].start_queue_id = qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
qc[k].type = qc[k].type =
cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
} }
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
@ -1761,10 +1786,11 @@ setup_rx:
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
struct idpf_queue *q; const struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq; q = &rx_qgrp->splitq.bufq_sets[j].bufq;
qc[k].type = cpu_to_le32(q->q_type); qc[k].type =
cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qc[k].start_queue_id = cpu_to_le32(q->q_id); qc[k].start_queue_id = cpu_to_le32(q->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) { for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); vqv[k].queue_type =
cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
if (idpf_is_queue_model_split(vport->txq_model)) { if (idpf_is_queue_model_split(vport->txq_model)) {
@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
num_rxq = rx_qgrp->singleq.num_rxq; num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) { for (j = 0; j < num_rxq; j++, k++) {
struct idpf_queue *rxq; struct idpf_rx_queue *rxq;
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else else
rxq = rx_qgrp->singleq.rxqs[j]; rxq = rx_qgrp->singleq.rxqs[j];
vqv[k].queue_type = cpu_to_le32(rxq->q_type); vqv[k].queue_type =
cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
vqv[k].queue_id = cpu_to_le32(rxq->q_id); vqv[k].queue_id = cpu_to_le32(rxq->q_id);
vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* queues virtchnl message is sent * queues virtchnl message is sent
*/ */
for (i = 0; i < vport->num_txq; i++) for (i = 0; i < vport->num_txq; i++)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); idpf_queue_set(POLL_MODE, vport->txqs[i]);
/* schedule the napi to receive all the marker packets */ /* schedule the napi to receive all the marker packets */
local_bh_disable(); local_bh_disable();
@ -2469,39 +2497,52 @@ do_memcpy:
* @frag: fragmentation allowed * @frag: fragmentation allowed
* *
*/ */
static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate, struct idpf_ptype_state *pstate,
bool ipv4, bool frag) bool ipv4, bool frag)
{ {
if (!pstate->outer_ip || !pstate->outer_frag) { if (!pstate->outer_ip || !pstate->outer_frag) {
ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true; pstate->outer_ip = true;
if (ipv4) if (ipv4)
ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else else
ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) { if (frag) {
ptype->outer_frag = IDPF_RX_PTYPE_FRAG; ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true; pstate->outer_frag = true;
} }
} else { } else {
ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4) if (ipv4)
ptype->tunnel_end_prot = ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
IDPF_RX_PTYPE_TUNNEL_END_IPV4;
else else
ptype->tunnel_end_prot = ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
IDPF_RX_PTYPE_TUNNEL_END_IPV6;
if (frag) if (frag)
ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
} }
} }
static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
{
if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
ptype->inner_prot)
ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
ptype->outer_ip)
ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
else
ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
libeth_rx_pt_gen_hash_type(ptype);
}
/** /**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure * @vport: virtual port data structure
@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{ {
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset; int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {}; struct idpf_vc_xn_params xn_params = {};
@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz; ssize_t reply_sz;
int i, j, k; int i, j, k;
if (vport->rx_ptype_lkup)
return 0;
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE; max_ptype = IDPF_RX_MAX_PTYPE;
else else
max_ptype = IDPF_RX_MAX_BASE_PTYPE; max_ptype = IDPF_RX_MAX_BASE_PTYPE;
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
if (!ptype_lkup)
return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info) if (!get_ptype_info)
@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */ /* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) == if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID) IDPF_INVALID_PTYPE_ID)
return 0; goto out;
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10); k = le16_to_cpu(ptype->ptype_id_10);
else else
k = ptype->ptype_id_8; k = ptype->ptype_id_8;
if (ptype->proto_id_count)
ptype_lkup[k].known = 1;
for (j = 0; j < ptype->proto_id_count; j++) { for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]); id = le16_to_cpu(ptype->proto_id[j]);
switch (id) { switch (id) {
@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state == if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) { IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type = ptype_lkup[k].tunnel_type =
IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |= pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT; IDPF_PTYPE_TUNNEL_IP_GRENAT;
} }
break; break;
case VIRTCHNL2_PROTO_HDR_MAC: case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip = ptype_lkup[k].outer_ip =
IDPF_RX_PTYPE_OUTER_L2; LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state == if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) { IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type = ptype_lkup[k].tunnel_type =
IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |= pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
} }
@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break; break;
case VIRTCHNL2_PROTO_HDR_UDP: case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot = ptype_lkup[k].inner_prot =
IDPF_RX_PTYPE_INNER_PROT_UDP; LIBETH_RX_PT_INNER_UDP;
break; break;
case VIRTCHNL2_PROTO_HDR_TCP: case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot = ptype_lkup[k].inner_prot =
IDPF_RX_PTYPE_INNER_PROT_TCP; LIBETH_RX_PT_INNER_TCP;
break; break;
case VIRTCHNL2_PROTO_HDR_SCTP: case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot = ptype_lkup[k].inner_prot =
IDPF_RX_PTYPE_INNER_PROT_SCTP; LIBETH_RX_PT_INNER_SCTP;
break; break;
case VIRTCHNL2_PROTO_HDR_ICMP: case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot = ptype_lkup[k].inner_prot =
IDPF_RX_PTYPE_INNER_PROT_ICMP; LIBETH_RX_PT_INNER_ICMP;
break; break;
case VIRTCHNL2_PROTO_HDR_PAY: case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer = ptype_lkup[k].payload_layer =
IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; LIBETH_RX_PT_PAYLOAD_L2;
break; break;
case VIRTCHNL2_PROTO_HDR_ICMPV6: case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH: case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break; break;
} }
} }
idpf_finalize_ptype_lookup(&ptype_lkup[k]);
} }
} }
out:
vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
return 0; return 0;
} }
@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
int num_qids, int num_qids,
u32 q_type) u32 q_type)
{ {
struct idpf_queue *q;
int i, j, k = 0; int i, j, k = 0;
switch (q_type) { switch (q_type) {
@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (i = 0; i < vport->num_txq_grp; i++) { for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k]; tx_qgrp->txqs[j]->q_id = qids[k];
tx_qgrp->txqs[j]->q_type =
VIRTCHNL2_QUEUE_TYPE_TX;
}
} }
break; break;
case VIRTCHNL2_QUEUE_TYPE_RX: case VIRTCHNL2_QUEUE_TYPE_RX:
@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_rxq = rx_qgrp->singleq.num_rxq; num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_qids; j++, k++) { for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
struct idpf_rx_queue *q;
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq; q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else else
q = rx_qgrp->singleq.rxqs[j]; q = rx_qgrp->singleq.rxqs[j];
q->q_id = qids[k]; q->q_id = qids[k];
q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
} }
} }
break; break;
@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
tx_qgrp->complq->q_id = qids[k]; tx_qgrp->complq->q_id = qids[k];
tx_qgrp->complq->q_type =
VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
} }
break; break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
u8 num_bufqs = vport->num_bufqs_per_qgrp; u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq; q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->q_id = qids[k]; q->q_id = qids[k];
q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
} }
} }
break; break;

View File

@ -6,7 +6,7 @@
/* Rx buffer management */ /* Rx buffer management */
/** /**
* libeth_rx_hw_len - get the actual buffer size to be passed to HW * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
* @pp: &page_pool_params of the netdev to calculate the size for * @pp: &page_pool_params of the netdev to calculate the size for
* @max_len: maximum buffer size for a single descriptor * @max_len: maximum buffer size for a single descriptor
* *
@ -14,7 +14,7 @@
* MTU the @dev has, HW required alignment, minimum and maximum allowed values, * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
* and system's page size. * and system's page size.
*/ */
static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
{ {
u32 len; u32 len;
@ -26,6 +26,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
return len; return len;
} }
/**
* libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
* @pp: &page_pool_params of the netdev to calculate the size for
* @max_len: maximum buffer size for a single descriptor
* @truesize: desired truesize for the buffers
*
* Return: HW-writeable length per one buffer to pass it to the HW ignoring the
* MTU and closest to the passed truesize. Can be used for "short" buffer
* queues to fragment pages more efficiently.
*/
static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
u32 max_len, u32 truesize)
{
u32 min, len;
min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
len = SKB_WITH_OVERHEAD(truesize - pp->offset);
len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
pp->max_len);
return len;
}
/**
* libeth_rx_page_pool_params - calculate params with the stack overhead
* @fq: buffer queue to calculate the size for
* @pp: &page_pool_params of the netdev
*
* Set the PP params to will all needed stack overhead (headroom, tailroom) and
* both the HW buffer length and the truesize for all types of buffers. For
* "short" buffers, truesize never exceeds the "wanted" one; for the rest,
* it can be up to the page size.
*
* Return: true on success, false on invalid input params.
*/
static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
struct page_pool_params *pp)
{
pp->offset = LIBETH_SKB_HEADROOM;
/* HW-writeable / syncable length per one page */
pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
/* HW-writeable length per buffer */
switch (fq->type) {
case LIBETH_FQE_MTU:
fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
break;
case LIBETH_FQE_SHORT:
fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
fq->truesize);
break;
case LIBETH_FQE_HDR:
fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
break;
default:
return false;
}
/* Buffer size to allocate */
fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
fq->buf_len));
return true;
}
/**
* libeth_rx_page_pool_params_zc - calculate params without the stack overhead
* @fq: buffer queue to calculate the size for
* @pp: &page_pool_params of the netdev
*
* Set the PP params to exclude the stack overhead and both the buffer length
* and the truesize, which are equal for the data buffers. Note that this
* requires separate header buffers to be always active and account the
* overhead.
* With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
* mode.
*
* Return: true on success, false on invalid input params.
*/
static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
struct page_pool_params *pp)
{
u32 mtu, max;
pp->offset = 0;
pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
switch (fq->type) {
case LIBETH_FQE_MTU:
mtu = READ_ONCE(pp->netdev->mtu);
break;
case LIBETH_FQE_SHORT:
mtu = fq->truesize;
break;
default:
return false;
}
mtu = roundup_pow_of_two(mtu);
max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
pp->max_len);
fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
fq->truesize = fq->buf_len;
return true;
}
/** /**
* libeth_rx_fq_create - create a PP with the default libeth settings * libeth_rx_fq_create - create a PP with the default libeth settings
* @fq: buffer queue struct to fill * @fq: buffer queue struct to fill
@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.netdev = napi->dev, .netdev = napi->dev,
.napi = napi, .napi = napi,
.dma_dir = DMA_FROM_DEVICE, .dma_dir = DMA_FROM_DEVICE,
.offset = LIBETH_SKB_HEADROOM,
}; };
struct libeth_fqe *fqes; struct libeth_fqe *fqes;
struct page_pool *pool; struct page_pool *pool;
bool ret;
/* HW-writeable / syncable length per one page */ if (!fq->hsplit)
pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset); ret = libeth_rx_page_pool_params(fq, &pp);
else
/* HW-writeable length per buffer */ ret = libeth_rx_page_pool_params_zc(fq, &pp);
fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len); if (!ret)
/* Buffer size to allocate */ return -EINVAL;
fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
fq->buf_len));
pool = page_pool_create(&pp); pool = page_pool_create(&pp);
if (IS_ERR(pool)) if (IS_ERR(pool))

View File

@ -13,6 +13,32 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif #endif
/**
* SMP_CACHE_ALIGN - align a value to the L2 cacheline size
* @x: value to align
*
* On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
* this needs to be accounted.
*
* Return: aligned value.
*/
#ifndef SMP_CACHE_ALIGN
#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES)
#endif
/*
* ``__aligned_largest`` aligns a field to the value most optimal for the
* target architecture to perform memory operations. Get the actual value
* to be able to use it anywhere else.
*/
#ifndef __LARGEST_ALIGN
#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest)
#endif
#ifndef LARGEST_ALIGN
#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN)
#endif
/* /*
* __read_mostly is used to keep rarely changing variables out of frequently * __read_mostly is used to keep rarely changing variables out of frequently
* updated cachelines. Its use should be reserved for data that is used * updated cachelines. Its use should be reserved for data that is used
@ -95,6 +121,39 @@
__u8 __cacheline_group_end__##GROUP[0] __u8 __cacheline_group_end__##GROUP[0]
#endif #endif
/**
* __cacheline_group_begin_aligned - declare an aligned group start
* @GROUP: name of the group
* @...: optional group alignment
*
* The following block inside a struct:
*
* __cacheline_group_begin_aligned(grp);
* field a;
* field b;
* __cacheline_group_end_aligned(grp);
*
* will always be aligned to either the specified alignment or
* ``SMP_CACHE_BYTES``.
*/
#define __cacheline_group_begin_aligned(GROUP, ...) \
__cacheline_group_begin(GROUP) \
__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
/**
* __cacheline_group_end_aligned - declare an aligned group end
* @GROUP: name of the group
* @...: optional alignment (same as was in __cacheline_group_begin_aligned())
*
* Note that the end marker is aligned to sizeof(long) to allow more precise
* size assertion. It also declares a padding at the end to avoid next field
* falling into this cacheline.
*/
#define __cacheline_group_end_aligned(GROUP, ...) \
__cacheline_group_end(GROUP) __aligned(sizeof(long)); \
struct { } __cacheline_group_pad__##GROUP \
__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
#ifndef CACHELINE_ASSERT_GROUP_MEMBER #ifndef CACHELINE_ASSERT_GROUP_MEMBER
#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \

View File

@ -0,0 +1,66 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2024 Intel Corporation */
#ifndef __LIBETH_CACHE_H
#define __LIBETH_CACHE_H
#include <linux/cache.h>
/**
* libeth_cacheline_group_assert - make sure cacheline group size is expected
* @type: type of the structure containing the group
* @grp: group name inside the struct
* @sz: expected group size
*/
#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64
#define libeth_cacheline_group_assert(type, grp, sz) \
static_assert(offsetof(type, __cacheline_group_end__##grp) - \
offsetofend(type, __cacheline_group_begin__##grp) == \
(sz))
#define __libeth_cacheline_struct_assert(type, sz) \
static_assert(sizeof(type) == (sz))
#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
#define libeth_cacheline_group_assert(type, grp, sz) \
static_assert(offsetof(type, __cacheline_group_end__##grp) - \
offsetofend(type, __cacheline_group_begin__##grp) <= \
(sz))
#define __libeth_cacheline_struct_assert(type, sz) \
static_assert(sizeof(type) <= (sz))
#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1)
#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2))
#define __libeth_cls3(sz1, sz2, sz3) \
(SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3))
#define __libeth_cls(...) \
CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
/**
* libeth_cacheline_struct_assert - make sure CL-based struct size is expected
* @type: type of the struct
* @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold)
*
* When a struct contains several CL groups, it's difficult to predict its size
* on different architectures. The macro instead takes sizes of all of the
* groups the structure contains and generates the final struct size.
*/
#define libeth_cacheline_struct_assert(type, ...) \
__libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \
static_assert(__alignof(type) >= SMP_CACHE_BYTES)
/**
* libeth_cacheline_set_assert - make sure CL-based struct layout is expected
* @type: type of the struct
* @ro: expected size of the read-mostly group
* @rw: expected size of the read-write group
* @c: expected size of the cold group
*
* Check that each group size is expected and then do final struct size check.
*/
#define libeth_cacheline_set_assert(type, ro, rw, c) \
libeth_cacheline_group_assert(type, read_mostly, ro); \
libeth_cacheline_group_assert(type, read_write, rw); \
libeth_cacheline_group_assert(type, cold, c); \
libeth_cacheline_struct_assert(type, ro, rw, c)
#endif /* __LIBETH_CACHE_H */

View File

@ -17,6 +17,8 @@
#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM #define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */ /* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN) #define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
/* Maximum supported L2-L4 header length */
#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256))
/* Always use order-0 pages */ /* Always use order-0 pages */
#define LIBETH_RX_PAGE_ORDER 0 #define LIBETH_RX_PAGE_ORDER 0
@ -43,6 +45,18 @@ struct libeth_fqe {
u32 truesize; u32 truesize;
} __aligned_largest; } __aligned_largest;
/**
* enum libeth_fqe_type - enum representing types of Rx buffers
* @LIBETH_FQE_MTU: buffer size is determined by MTU
* @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames
* @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers
*/
enum libeth_fqe_type {
LIBETH_FQE_MTU = 0U,
LIBETH_FQE_SHORT,
LIBETH_FQE_HDR,
};
/** /**
* struct libeth_fq - structure representing a buffer (fill) queue * struct libeth_fq - structure representing a buffer (fill) queue
* @fp: hotpath part of the structure * @fp: hotpath part of the structure
@ -50,6 +64,8 @@ struct libeth_fqe {
* @fqes: array of Rx buffers * @fqes: array of Rx buffers
* @truesize: size to allocate per buffer, w/overhead * @truesize: size to allocate per buffer, w/overhead
* @count: number of descriptors/buffers the queue has * @count: number of descriptors/buffers the queue has
* @type: type of the buffers this queue has
* @hsplit: flag whether header split is enabled
* @buf_len: HW-writeable length per each buffer * @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory * @nid: ID of the closest NUMA node with memory
*/ */
@ -63,6 +79,9 @@ struct libeth_fq {
); );
/* Cold fields */ /* Cold fields */
enum libeth_fqe_type type:2;
bool hsplit:1;
u32 buf_len; u32 buf_len;
int nid; int nid;
}; };

View File

@ -129,6 +129,16 @@ struct page_pool_stats {
}; };
#endif #endif
/* The whole frag API block must stay within one cacheline. On 32-bit systems,
* sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``.
* On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``.
* The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that
* one for simplicity.
* Having it aligned to a cacheline boundary may be excessive and doesn't bring
* any good.
*/
#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
struct page_pool { struct page_pool {
struct page_pool_params_fast p; struct page_pool_params_fast p;
@ -142,19 +152,11 @@ struct page_pool {
bool system:1; /* This is a global percpu pool */ bool system:1; /* This is a global percpu pool */
#endif #endif
/* The following block must stay within one cacheline. On 32-bit __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
* systems, sizeof(long) == sizeof(int), so that the block size is
* ``3 * sizeof(long)``. On 64-bit systems, the actual size is
* ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of
* them is ``4 * sizeof(long)``, so just use that one for simplicity.
* Having it aligned to a cacheline boundary may be excessive and
* doesn't bring any good.
*/
__cacheline_group_begin(frag) __aligned(4 * sizeof(long));
long frag_users; long frag_users;
netmem_ref frag_page; netmem_ref frag_page;
unsigned int frag_offset; unsigned int frag_offset;
__cacheline_group_end(frag); __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
struct delayed_work release_dw; struct delayed_work release_dw;
void (*disconnect)(void *pool); void (*disconnect)(void *pool);

View File

@ -178,7 +178,8 @@ static void page_pool_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users);
CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page);
CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset);
CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, 4 * sizeof(long)); CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag,
PAGE_POOL_FRAG_GROUP_ALIGN);
} }
static int page_pool_init(struct page_pool *pool, static int page_pool_init(struct page_pool *pool,