Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
Tony Nguyen says: ==================== ice: xsk: reduced queue count fixes Maciej Fijalkowski says: this small series is supposed to fix the issues around AF_XDP usage with reduced queue count on interface. Due to the XDP rings setup, some configurations can result in sockets not seeing traffic flowing. More about this in description of patch 2. * '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue: ice: xsk: use Rx ring's XDP ring when picking NAPI context ice: xsk: prohibit usage of non-balanced queue id ==================== Link: https://lore.kernel.org/r/20220822163257.2382487-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
3118067842
@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
|
||||
* ice_xsk_pool - get XSK buffer pool bound to a ring
|
||||
* @ring: Rx ring to use
|
||||
*
|
||||
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
|
||||
* NULL otherwise.
|
||||
* Returns a pointer to xsk_buff_pool structure if there is a buffer pool
|
||||
* present, NULL otherwise.
|
||||
*/
|
||||
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
|
||||
{
|
||||
@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_tx_xsk_pool - get XSK buffer pool bound to a ring
|
||||
* @ring: Tx ring to use
|
||||
* ice_tx_xsk_pool - assign XSK buff pool to XDP ring
|
||||
* @vsi: pointer to VSI
|
||||
* @qid: index of a queue to look at XSK buff pool presence
|
||||
*
|
||||
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
|
||||
* NULL otherwise. Tx equivalent of ice_xsk_pool.
|
||||
* Sets XSK buff pool pointer on XDP ring.
|
||||
*
|
||||
* XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
|
||||
* queue id. Reason for doing so is that queue vectors might have assigned more
|
||||
* than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
|
||||
* carries a pointer to one of these XDP rings for its own purposes, such as
|
||||
* handling XDP_TX action, therefore we can piggyback here on the
|
||||
* rx_ring->xdp_ring assignment that was done during XDP rings initialization.
|
||||
*/
|
||||
static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
|
||||
static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
|
||||
{
|
||||
struct ice_vsi *vsi = ring->vsi;
|
||||
u16 qid;
|
||||
struct ice_tx_ring *ring;
|
||||
|
||||
qid = ring->q_index - vsi->alloc_txq;
|
||||
ring = vsi->rx_rings[qid]->xdp_ring;
|
||||
if (!ring)
|
||||
return;
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
|
||||
return NULL;
|
||||
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
|
||||
ring->xsk_pool = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
return xsk_get_pool_from_qid(vsi->netdev, qid);
|
||||
ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1986,8 +1986,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ice_for_each_xdp_txq(vsi, i)
|
||||
vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
|
||||
ice_for_each_rxq(vsi, i)
|
||||
ice_tx_xsk_pool(vsi, i);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2581,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
|
||||
if (ice_setup_tx_ring(xdp_ring))
|
||||
goto free_xdp_rings;
|
||||
ice_set_ring_xdp(xdp_ring);
|
||||
xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
|
||||
spin_lock_init(&xdp_ring->tx_lock);
|
||||
for (j = 0; j < xdp_ring->count; j++) {
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, j);
|
||||
@ -2589,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
|
||||
}
|
||||
}
|
||||
|
||||
ice_for_each_rxq(vsi, i) {
|
||||
if (static_key_enabled(&ice_xdp_locking_key))
|
||||
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
|
||||
else
|
||||
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_xdp_rings:
|
||||
@ -2685,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
||||
xdp_rings_rem -= xdp_rings_per_v;
|
||||
}
|
||||
|
||||
ice_for_each_rxq(vsi, i) {
|
||||
if (static_key_enabled(&ice_xdp_locking_key)) {
|
||||
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
|
||||
} else {
|
||||
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
|
||||
struct ice_tx_ring *ring;
|
||||
|
||||
ice_for_each_tx_ring(ring, q_vector->tx) {
|
||||
if (ice_ring_is_xdp(ring)) {
|
||||
vsi->rx_rings[i]->xdp_ring = ring;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
ice_tx_xsk_pool(vsi, i);
|
||||
}
|
||||
|
||||
/* omit the scheduler update if in reset path; XDP queues will be
|
||||
* taken into account at the end of ice_vsi_rebuild, where
|
||||
* ice_cfg_vsi_lan is being called
|
||||
|
@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
|
||||
if (err)
|
||||
goto free_buf;
|
||||
ice_set_ring_xdp(xdp_ring);
|
||||
xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
|
||||
ice_tx_xsk_pool(vsi, q_idx);
|
||||
}
|
||||
|
||||
err = ice_vsi_cfg_rxq(rx_ring);
|
||||
@ -329,6 +329,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||||
bool if_running, pool_present = !!pool;
|
||||
int ret = 0, pool_failure = 0;
|
||||
|
||||
if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
|
||||
netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
|
||||
pool_failure = -EINVAL;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
|
||||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
|
||||
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
|
||||
@ -353,7 +359,7 @@ xsk_pool_if_up:
|
||||
if (if_running) {
|
||||
ret = ice_qp_ena(vsi, qid);
|
||||
if (!ret && pool_present)
|
||||
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
|
||||
napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
|
||||
else if (ret)
|
||||
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
|
||||
}
|
||||
@ -944,13 +950,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
|
||||
if (!ice_is_xdp_ena_vsi(vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (queue_id >= vsi->num_txq)
|
||||
if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
|
||||
return -EINVAL;
|
||||
|
||||
if (!vsi->xdp_rings[queue_id]->xsk_pool)
|
||||
return -EINVAL;
|
||||
ring = vsi->rx_rings[queue_id]->xdp_ring;
|
||||
|
||||
ring = vsi->xdp_rings[queue_id];
|
||||
if (!ring->xsk_pool)
|
||||
return -EINVAL;
|
||||
|
||||
/* The idea here is that if NAPI is running, mark a miss, so
|
||||
* it will run again. If not, trigger an interrupt and
|
||||
|
Loading…
Reference in New Issue
Block a user