mlx5-fixes-2020-04-20
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl6eFG4ACgkQSD+KveBX +j74aQf+I5hugtd5yrahTRySKfDF9wVfP1fo3yj8qR3qfTpKDA1l0VjRA0rTwMB8 llwiTuVMJSA/CWYPDgCOSWI8k00Mm6fC1PSeDoKrjF4AQiBYEiLJUc562Wekk4+E eLUn26h1YM5dbuA4G1dJEmLqKgEfzcczgFlJKXcCwCZoGVPgjr1dPogXsUtLphD2 NfTIXIcMyf+way7gn5eLR4Y/V39HlTxktI5ijeDVH68RhwGgAwe/OuDmlB0APNf3 jdOl17VRzqd4Mq8zD20gHPK7klJQ3yeQfPJgF95uV6B+mULt6+y6WQENEZdzAhtw SCfegXoK8kd1nnJgMR5TwJ5SzddOnw== =CooV -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2020-04-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux mlx5-fixes-2020-04-20 Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a460fc5d4c
@ -7,10 +7,10 @@ config MLX5_CORE
|
||||
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
|
||||
depends on PCI
|
||||
select NET_DEVLINK
|
||||
imply PTP_1588_CLOCK
|
||||
imply VXLAN
|
||||
imply MLXFW
|
||||
imply PCI_HYPERV_INTERFACE
|
||||
depends on VXLAN || !VXLAN
|
||||
depends on MLXFW || !MLXFW
|
||||
depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
|
||||
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
|
||||
default n
|
||||
---help---
|
||||
Core driver for low level functionality of the ConnectX-4 and
|
||||
|
@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
|
||||
tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
|
||||
if (!tracer)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -982,7 +982,7 @@ destroy_workqueue:
|
||||
tracer->dev = NULL;
|
||||
destroy_workqueue(tracer->work_queue);
|
||||
free_tracer:
|
||||
kfree(tracer);
|
||||
kvfree(tracer);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
|
||||
mlx5_fw_tracer_destroy_log_buf(tracer);
|
||||
flush_workqueue(tracer->work_queue);
|
||||
destroy_workqueue(tracer->work_queue);
|
||||
kfree(tracer);
|
||||
kvfree(tracer);
|
||||
}
|
||||
|
||||
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
|
||||
|
@ -367,6 +367,7 @@ enum {
|
||||
MLX5E_SQ_STATE_AM,
|
||||
MLX5E_SQ_STATE_TLS,
|
||||
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
|
||||
MLX5E_SQ_STATE_PENDING_XSK_TX,
|
||||
};
|
||||
|
||||
struct mlx5e_sq_wqe_info {
|
||||
@ -960,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
|
||||
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
|
||||
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
|
||||
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
|
||||
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <net/flow_offload.h>
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include "esw/chains.h"
|
||||
#include "en/tc_ct.h"
|
||||
@ -35,7 +36,7 @@ struct mlx5_tc_ct_priv {
|
||||
struct mlx5_eswitch *esw;
|
||||
const struct net_device *netdev;
|
||||
struct idr fte_ids;
|
||||
struct idr tuple_ids;
|
||||
struct xarray tuple_ids;
|
||||
struct rhashtable zone_ht;
|
||||
struct mlx5_flow_table *ct;
|
||||
struct mlx5_flow_table *ct_nat;
|
||||
@ -238,7 +239,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
|
||||
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
|
||||
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
|
||||
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
|
||||
xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -483,7 +484,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
|
||||
struct mlx5_eswitch *esw = ct_priv->esw;
|
||||
struct mlx5_flow_spec *spec = NULL;
|
||||
u32 tupleid = 1;
|
||||
u32 tupleid;
|
||||
int err;
|
||||
|
||||
zone_rule->nat = nat;
|
||||
@ -493,12 +494,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
return -ENOMEM;
|
||||
|
||||
/* Get tuple unique id */
|
||||
err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
|
||||
TUPLE_ID_MAX, GFP_KERNEL);
|
||||
err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
|
||||
XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
|
||||
if (err) {
|
||||
netdev_warn(ct_priv->netdev,
|
||||
"Failed to allocate tuple id, err: %d\n", err);
|
||||
goto err_idr_alloc;
|
||||
goto err_xa_alloc;
|
||||
}
|
||||
zone_rule->tupleid = tupleid;
|
||||
|
||||
@ -539,8 +540,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
err_rule:
|
||||
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
|
||||
err_mod_hdr:
|
||||
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
|
||||
err_idr_alloc:
|
||||
xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
|
||||
err_xa_alloc:
|
||||
kfree(spec);
|
||||
return err;
|
||||
}
|
||||
@ -1299,7 +1300,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
|
||||
}
|
||||
|
||||
idr_init(&ct_priv->fte_ids);
|
||||
idr_init(&ct_priv->tuple_ids);
|
||||
xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
|
||||
mutex_init(&ct_priv->control_lock);
|
||||
rhashtable_init(&ct_priv->zone_ht, &zone_params);
|
||||
|
||||
@ -1334,7 +1335,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
|
||||
|
||||
rhashtable_destroy(&ct_priv->zone_ht);
|
||||
mutex_destroy(&ct_priv->control_lock);
|
||||
idr_destroy(&ct_priv->tuple_ids);
|
||||
xa_destroy(&ct_priv->tuple_ids);
|
||||
idr_destroy(&ct_priv->fte_ids);
|
||||
kfree(ct_priv);
|
||||
|
||||
@ -1352,7 +1353,7 @@ mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
|
||||
if (!ct_priv || !tupleid)
|
||||
return true;
|
||||
|
||||
zone_rule = idr_find(&ct_priv->tuple_ids, tupleid);
|
||||
zone_rule = xa_load(&ct_priv->tuple_ids, tupleid);
|
||||
if (!zone_rule)
|
||||
return false;
|
||||
|
||||
|
@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
|
||||
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
|
||||
return 0;
|
||||
|
||||
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
|
||||
return 0;
|
||||
|
||||
spin_lock(&c->xskicosq_lock);
|
||||
mlx5e_trigger_irq(&c->xskicosq);
|
||||
spin_unlock(&c->xskicosq_lock);
|
||||
|
@ -3583,7 +3583,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
|
||||
if (!mlx5e_monitor_counter_supported(priv)) {
|
||||
/* In switchdev mode, monitor counters doesn't monitor
|
||||
* rx/tx stats of 802_3. The update stats mechanism
|
||||
* should keep the 802_3 layout counters updated
|
||||
*/
|
||||
if (!mlx5e_monitor_counter_supported(priv) ||
|
||||
mlx5e_is_uplink_rep(priv)) {
|
||||
/* update HW stats in background for next time */
|
||||
mlx5e_queue_update_stats(priv);
|
||||
}
|
||||
|
@ -589,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
return !!err;
|
||||
}
|
||||
|
||||
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
{
|
||||
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
|
||||
struct mlx5_cqe64 *cqe;
|
||||
@ -597,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
int i;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
||||
if (likely(!cqe))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
|
||||
* otherwise a cq overrun may occur
|
||||
@ -650,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
sq->cc = sqcc;
|
||||
|
||||
mlx5_cqwq_update_db_record(&cq->wq);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
||||
|
@ -152,7 +152,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
||||
mlx5e_post_rx_wqes,
|
||||
rq);
|
||||
if (xsk_open) {
|
||||
mlx5e_poll_ico_cq(&c->xskicosq.cq);
|
||||
if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
|
||||
/* Don't clear the flag if nothing was polled to prevent
|
||||
* queueing more WQEs and overflowing XSKICOSQ.
|
||||
*/
|
||||
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
|
||||
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
|
||||
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user