diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c44669102626..f2fa1307e90c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -530,6 +530,8 @@ typedef struct sk_buff * typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); +int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); + enum mlx5e_rq_flag { MLX5E_RQ_FLAG_XDP_XMIT, MLX5E_RQ_FLAG_XDP_REDIRECT, @@ -812,6 +814,13 @@ struct mlx5e_priv { struct mlx5e_scratchpad scratchpad; }; +struct mlx5e_rx_handlers { + mlx5e_fp_handle_rx_cqe handle_rx_cqe; + mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; +}; + +extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; + struct mlx5e_profile { int (*init)(struct mlx5_core_dev *mdev, struct net_device *netdev, @@ -828,58 +837,17 @@ struct mlx5e_profile { void (*update_carrier)(struct mlx5e_priv *priv); unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); mlx5e_stats_grp_t *stats_grps; - struct { - mlx5e_fp_handle_rx_cqe handle_rx_cqe; - mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; - } rx_handlers; + const struct mlx5e_rx_handlers *rx_handlers; int max_tc; u8 rq_groups; }; void mlx5e_build_ptys2ethtool_map(void); -u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); -netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); -void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); - -void mlx5e_trigger_irq(struct mlx5e_icosq *sq); -void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); -void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); -int mlx5e_napi_poll(struct napi_struct *napi, int budget); -bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); -int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); -void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); - bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); -void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info, - bool recycle); -void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); -int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); -bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); -void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); -struct sk_buff * -mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, - u16 cqe_bcnt, u32 head_offset, u32 page_idx); -struct sk_buff * -mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, - u16 cqe_bcnt, u32 head_offset, u32 page_idx); -struct sk_buff * -mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); -struct sk_buff * -mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); - void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); @@ -982,8 +950,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); -void mlx5e_free_rx_descs(struct mlx5e_rq *rq); -void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); @@ -1008,6 +974,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, bool enable_mc_lb); +void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); /* common netdev helpers */ void mlx5e_create_q_counters(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index cf425a60cddc..9334c9c3e208 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -5,6 +5,7 @@ #define __MLX5_EN_TXRX_H___ #include "en.h" +#include #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) @@ -18,6 +19,33 @@ enum mlx5e_icosq_wqe_type { #endif }; +/* General */ +void mlx5e_trigger_irq(struct mlx5e_icosq *sq); +void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); +int mlx5e_napi_poll(struct napi_struct *napi, int budget); +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); + +/* RX */ +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); +void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, + struct mlx5e_dma_info *dma_info, + bool recycle); +INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); +INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); +int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +void mlx5e_free_rx_descs(struct mlx5e_rq *rq); +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); + +/* TX */ +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); +void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); +void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); + static inline bool mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) { @@ -360,7 +388,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, switch (swp_spec->tun_l4_proto) { case IPPROTO_UDP: eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; - /* fall through */ + fallthrough; case IPPROTO_TCP: eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index e0c1b010d41a..0e6946fc121f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -34,7 +34,6 @@ #include #include "en/xdp.h" #include "en/params.h" -#include int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { @@ -153,11 +152,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, return true; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: xdp_abort: trace_xdp_exception(rq->netdev, prog, act); - /* fall through */ + fallthrough; case XDP_DROP: rq->stats->xdp_drop++; return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index cc46414773b5..dd9df519d383 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -3,6 +3,7 @@ #include "setup.h" #include "en/params.h" +#include "en/txrx.h" /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may * change unexpectedly, and mlx5e has a minimum valid stride size for striding diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 0dfbc96e952a..4d892f6cecb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -6,7 +6,6 @@ #include "en/xdp.h" #include "en/params.h" #include -#include int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 2a47673da5a4..f96e786db158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -47,7 +47,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 *cqe_bcnt); -void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 0e6698d1b4ca..f4861545b236 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -470,7 +470,7 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s if (likely(!skb->decrypted)) goto out; WARN_ON_ONCE(1); - /* fall-through */ + fallthrough; case MLX5E_KTLS_SYNC_FAIL: goto err_out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 1e42c7ae621b..a6cf008057b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -60,6 +60,16 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, mutex_unlock(&mdev->mlx5e_res.td.list_lock); } +void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) +{ + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); + bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); + + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); +} + static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) { @@ -76,7 +86,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, qpn, 0xffffff); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index af849bc83c30..08270987c506 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -243,7 +243,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) return MLX5E_NUM_PFLAGS; case ETH_SS_TEST: return mlx5e_self_test_num(priv); - /* fallthrough */ + fallthrough; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9d5d8b28bcd8..f374348fa810 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -45,7 +45,6 @@ #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" -#include "en_accel/ipsec_rxtx.h" #include "en_accel/en_accel.h" #include "en_accel/tls.h" #include "accel/ipsec.h" @@ -65,7 +64,6 @@ #include "en/hv_vhca_stats.h" #include "en/devlink.h" #include "lib/mlx5.h" -#include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -276,7 +274,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); - + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); MLX5_SET64(mkc, mkc, len, npages << page_shift); @@ -428,29 +426,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params, xsk); - rq->post_wqes = mlx5e_post_rx_mpwqes; - rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - - rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) { - err = -EINVAL; - netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); - goto err_rq_wq_destroy; - } -#endif - if (!rq->handle_rx_cqe) { - err = -EINVAL; - netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err); - goto err_rq_wq_destroy; - } - - rq->mpwqe.skb_from_cqe_mpwrq = xsk ? - mlx5e_xsk_skb_from_cqe_mpwrq_linear : - mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? - mlx5e_skb_from_cqe_mpwrq_linear : - mlx5e_skb_from_cqe_mpwrq_nonlinear; - rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); @@ -492,30 +467,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, if (err) goto err_free; - rq->post_wqes = mlx5e_post_rx_wqes; - rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; - -#ifdef CONFIG_MLX5_EN_IPSEC - if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && - c->priv->ipsec) - rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; - else -#endif - rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; - if (!rq->handle_rx_cqe) { - err = -EINVAL; - netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); - goto err_free; - } - - rq->wqe.skb_from_cqe = xsk ? - mlx5e_xsk_skb_from_cqe_linear : - mlx5e_rx_is_linear_skb(params, NULL) ? - mlx5e_skb_from_cqe_linear : - mlx5e_skb_from_cqe_nonlinear; rq->mkey_be = c->mkey_be; } + err = mlx5e_rq_set_handlers(rq, params, xsk); + if (err) + goto err_free; + if (xsk) { err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); @@ -5288,8 +5246,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .update_rx = mlx5e_update_nic_rx, .update_stats = mlx5e_update_ndo_stats, .update_carrier = mlx5e_update_carrier, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, + .rx_handlers = &mlx5e_rx_handlers_nic, .max_tc = MLX5E_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_nic_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c300729fb498..111477086f66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -42,6 +42,7 @@ #include "esw/chains.h" #include "en.h" #include "en_rep.h" +#include "en/txrx.h" #include "en_tc.h" #include "en/rep/tc.h" #include "en/rep/neigh.h" @@ -699,8 +700,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_core_dev *mdev = priv->mdev; + SET_NETDEV_DEV(netdev, mdev->device); if (rep->vport == MLX5_VPORT_UPLINK) { - SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; /* we want a persistent mac for the uplink rep */ mlx5_query_mac_address(mdev, netdev->dev_addr); @@ -1143,8 +1144,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .enable = mlx5e_rep_enable, .update_rx = mlx5e_update_rep_rx, .update_stats = mlx5e_update_ndo_stats, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, + .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = 1, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_rep_stats_grps, @@ -1163,8 +1163,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .update_rx = mlx5e_update_rep_rx, .update_stats = mlx5e_update_ndo_stats, .update_carrier = mlx5e_update_carrier, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, + .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = MLX5E_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_ul_rep_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 1d5669801484..622c27ae4ac7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -41,6 +41,8 @@ #include "lib/port_tun.h" #ifdef CONFIG_MLX5_ESWITCH +extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep; + struct mlx5e_neigh_update_table { struct rhashtable neigh_ht; /* Save the neigh hash entries in a list in addition to the hash table @@ -223,10 +225,6 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); -void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe); - void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); bool mlx5e_eswitch_vf_rep(struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 74860f3827b1..65828af120b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -34,22 +34,39 @@ #include #include #include -#include #include #include #include #include "en.h" +#include "en/txrx.h" #include "en_tc.h" #include "eswitch.h" #include "en_rep.h" #include "en/rep/tc.h" #include "ipoib/ipoib.h" +#include "accel/ipsec.h" +#include "fpga/ipsec.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" #include "lib/clock.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/health.h" +#include "en/params.h" + +static struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +static struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + +const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { + .handle_rx_cqe = mlx5e_handle_rx_cqe, + .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, +}; static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { @@ -370,7 +387,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, mlx5e_put_rx_frag(rq, wi, recycle); } -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) +static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); @@ -537,14 +554,14 @@ err: return err; } -void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) +static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; /* Don't recycle, this function is called on rq/netdev close */ mlx5e_free_rx_mpwqe(rq, wi, false); } -bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) +INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; u8 wqe_bulk; @@ -685,7 +702,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) return i; } -bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) +INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq; @@ -1106,7 +1123,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, xdp->frame_sz = rq->buff.frame0_sz; } -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -1146,7 +1163,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return skb; } -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -1201,7 +1218,7 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } } -void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; @@ -1244,7 +1261,7 @@ wq_cyc_pop: } #ifdef CONFIG_MLX5_ESWITCH -void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct net_device *netdev = rq->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1299,8 +1316,7 @@ wq_cyc_pop: mlx5_wq_cyc_pop(wq); } -void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); @@ -1358,9 +1374,14 @@ mpwrq_cqe_out: mlx5e_free_rx_mpwqe(rq, wi, true); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } + +const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { + .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, + .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, +}; #endif -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { @@ -1406,7 +1427,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w return skb; } -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { @@ -1456,7 +1477,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return skb; } -void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); @@ -1652,7 +1673,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, stats->bytes += cqe_bcnt; } -void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; @@ -1688,11 +1709,15 @@ wq_free_wqe: mlx5_wq_cyc_pop(wq); } +const struct mlx5e_rx_handlers mlx5i_rx_handlers = { + .handle_rx_cqe = mlx5i_handle_rx_cqe, + .handle_rx_cqe_mpwqe = NULL, /* Not supported */ +}; #endif /* CONFIG_MLX5_CORE_IPOIB */ #ifdef CONFIG_MLX5_EN_IPSEC -void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; @@ -1729,3 +1754,55 @@ wq_free_wqe: } #endif /* CONFIG_MLX5_EN_IPSEC */ + +int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) +{ + struct mlx5_core_dev *mdev = rq->mdev; + struct mlx5e_channel *c = rq->channel; + + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + rq->mpwqe.skb_from_cqe_mpwrq = xsk ? + mlx5e_xsk_skb_from_cqe_mpwrq_linear : + mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? + mlx5e_skb_from_cqe_mpwrq_linear : + mlx5e_skb_from_cqe_mpwrq_nonlinear; + rq->post_wqes = mlx5e_post_rx_mpwqes; + rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; + + rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe; +#ifdef CONFIG_MLX5_EN_IPSEC + if (MLX5_IPSEC_DEV(mdev)) { + netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); + return -EINVAL; + } +#endif + if (!rq->handle_rx_cqe) { + netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n"); + return -EINVAL; + } + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + rq->wqe.skb_from_cqe = xsk ? + mlx5e_xsk_skb_from_cqe_linear : + mlx5e_rx_is_linear_skb(params, NULL) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; + rq->post_wqes = mlx5e_post_rx_wqes; + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; + +#ifdef CONFIG_MLX5_EN_IPSEC + if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && + c->priv->ipsec) + rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; + else +#endif + rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe; + if (!rq->handle_rx_cqe) { + netdev_err(c->netdev, "RX handler of RQ is not set\n"); + return -EINVAL; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index e3dbab2a294c..de10b06bade5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -31,8 +31,8 @@ */ #include -#include #include "en.h" +#include "en/txrx.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/xsk/tx.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b68e02ad65e2..1f52b329e915 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -271,7 +271,6 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; int mode; - int nvports; u16 manager_vport; u16 first_host_vport; struct mlx5_esw_functions esw_funcs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index db856d70c4f8..be610d40749a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1132,7 +1132,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, } } -static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) +static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; @@ -1165,7 +1165,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + + table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + MLX5_ESW_MISS_FLOWS + esw->total_vports; /* create the slow path fdb with encap set, so further table instances @@ -1202,7 +1202,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; + ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); @@ -1270,7 +1270,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto miss_rule_err; - esw->nvports = nvports; kvfree(flow_group_in); return 0; @@ -1311,7 +1310,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) MLX5_FLOW_STEERING_MODE_DMFS); } -static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) +static int esw_create_offloads_table(struct mlx5_eswitch *esw) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; @@ -1325,7 +1324,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) return -EOPNOTSUPP; } - ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; + ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS; ft_attr.prio = 1; ft_offloads = mlx5_create_flow_table(ns, &ft_attr); @@ -1346,14 +1345,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) mlx5_destroy_flow_table(offloads->ft_offloads); } -static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports) +static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *g; u32 *flow_group_in; + int nvports; int err = 0; - nvports = nvports + MLX5_ESW_MISS_FLOWS; + nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1986,15 +1986,8 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static int esw_offloads_steering_init(struct mlx5_eswitch *esw) { - int num_vfs = esw->esw_funcs.num_vfs; - int total_vports; int err; - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) - total_vports = esw->total_vports; - else - total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); - memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.vports.lock); hash_init(esw->fdb_table.offloads.vports.table); @@ -2003,7 +1996,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_acl_err; - err = esw_create_offloads_table(esw, total_vports); + err = esw_create_offloads_table(esw); if (err) goto create_offloads_err; @@ -2011,11 +2004,11 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_restore_err; - err = esw_create_offloads_fdb_tables(esw, total_vports); + err = esw_create_offloads_fdb_tables(esw); if (err) goto create_fdb_err; - err = esw_create_vport_rx_group(esw, total_vports); + err = esw_create_vport_rx_group(esw); if (err) goto create_fg_err; @@ -2353,7 +2346,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) goto out; - /* fall through */ + fallthrough; case MLX5_CAP_INLINE_MODE_L2: NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); err = -EOPNOTSUPP; @@ -2465,13 +2458,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, esw->offloads.encap = encap; - err = esw_create_offloads_fdb_tables(esw, esw->nvports); + err = esw_create_offloads_fdb_tables(esw); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed re-creating fast FDB table"); esw->offloads.encap = !encap; - (void)esw_create_offloads_fdb_tables(esw, esw->nvports); + (void)esw_create_offloads_fdb_tables(esw); } unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 182d3ac3e73f..831d2c39e153 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -339,14 +339,14 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, switch (opcode) { case MLX5_CQE_REQ_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; - /* Fall through */ + fallthrough; case MLX5_CQE_REQ: mlx5_fpga_conn_sq_cqe(conn, cqe, status); break; case MLX5_CQE_RESP_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; - /* Fall through */ + fallthrough; case MLX5_CQE_RESP_SEND: mlx5_fpga_conn_rq_cqe(conn, cqe, status); break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 690b822c6152..5763965d5ef3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -464,8 +464,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .update_rx = mlx5i_update_nic_rx, .update_stats = NULL, /* mlx5i_update_stats */ .update_carrier = NULL, /* no HW update in IB link */ - .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, - .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ + .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5i_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 79071a15c4ca..b79dc1e28c41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -42,6 +42,7 @@ extern const struct ethtool_ops mlx5i_ethtool_ops; extern const struct ethtool_ops mlx5i_pkey_ethtool_ops; +extern const struct mlx5e_rx_handlers mlx5i_rx_handlers; #define MLX5_IB_GRH_BYTES 40 #define MLX5_IPOIB_ENCAP_LEN 4 @@ -117,7 +118,6 @@ struct mlx5i_tx_wqe { void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more); -void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* CONFIG_MLX5_CORE_IPOIB */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index f70367018862..7163d9f6c4a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -349,8 +349,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .disable = NULL, .update_rx = mlx5i_update_nic_rx, .update_stats = NULL, - .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, - .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ + .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index e9089a793632..9e68f5926ab6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -198,13 +198,13 @@ static void mlx5_lag_fib_update(struct work_struct *work) /* Protect internal structures from changes */ rtnl_lock(); switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: mlx5_lag_fib_route_event(ldev, fib_work->event, fib_work->fen_info.fi); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fib_nh = fib_work->fnh_info.fib_nh; mlx5_lag_fib_nexthop_event(ldev, @@ -255,7 +255,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, return NOTIFY_DONE; switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); @@ -278,7 +278,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 5ddd18639a1e..a4a23a27c368 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mlx5_core.h" #include "lib/eq.h" @@ -73,15 +74,45 @@ enum { MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; +static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id) +{ + struct rb_root *root; + int err; + + root = xa_load(&dev->priv.page_root_xa, func_id); + if (root) + return root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + + err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL); + if (err) { + kfree(root); + return ERR_PTR(err); + } + + *root = RB_ROOT; + + return root; +} + static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; + struct rb_root *root; + struct rb_node **new; struct fw_page *nfp; struct fw_page *tfp; int i; + root = page_root_per_func_id(dev, func_id); + if (IS_ERR(root)) + return PTR_ERR(root); + + new = &root->rb_node; + while (*new) { parent = *new; tfp = rb_entry(parent, struct fw_page, rb_node); @@ -111,13 +142,20 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u return 0; } -static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, + u32 func_id) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node *tmp = root->rb_node; struct fw_page *result = NULL; + struct rb_root *root; + struct rb_node *tmp; struct fw_page *tfp; + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return NULL; + + tmp = root->rb_node; + while (tmp) { tfp = rb_entry(tmp, struct fw_page, rb_node); if (tfp->addr < addr) { @@ -191,7 +229,13 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, bool in_free_list) { - rb_erase(&fwp->rb_node, &dev->priv.page_root); + struct rb_root *root; + + root = xa_load(&dev->priv.page_root_xa, fwp->func_id); + if (WARN_ON_ONCE(!root)) + return; + + rb_erase(&fwp->rb_node, root); if (in_free_list) list_del(&fwp->list); dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK, @@ -200,12 +244,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, kfree(fwp); } -static void free_4k(struct mlx5_core_dev *dev, u64 addr) +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id); if (!fwp) { mlx5_core_warn_rl(dev, "page not found\n"); return; @@ -340,7 +384,7 @@ retry: out_4k: for (i--; i >= 0; i--) - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id); out_free: kvfree(in); if (notify_fail) @@ -351,16 +395,19 @@ out_free: static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, bool ec_function) { + struct rb_root *root; struct rb_node *p; int npages = 0; - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return; + + p = rb_first(root); while (p) { struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); free_fwp(dev, fwp, fwp->free_count); } @@ -378,6 +425,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { + struct rb_root *root; struct fw_page *fwp; struct rb_node *p; u32 func_id; @@ -391,12 +439,14 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, npages = MLX5_GET(manage_pages_in, in, input_num_entries); func_id = MLX5_GET(manage_pages_in, in, function_id); - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return -EEXIST; + + p = rb_first(root); while (p && i < npages) { fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); i++; @@ -430,7 +480,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, MLX5_SET(manage_pages_in, in, input_num_entries, npages); MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n", + func_id, npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); @@ -446,7 +497,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } for (i = 0; i < num_claimed; i++) - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id); if (nclaimed) *nclaimed = num_claimed; @@ -560,35 +611,49 @@ static int optimal_reclaimed_pages(void) return ret; } -int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, + struct rb_root *root, u16 func_id) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); - struct fw_page *fwp; - struct rb_node *p; - int nclaimed = 0; - int err = 0; - do { - p = rb_first(&dev->priv.page_root); - if (p) { - fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(dev, fwp->func_id, - optimal_reclaimed_pages(), - &nclaimed, mlx5_core_is_ecpf(dev)); + while (!RB_EMPTY_ROOT(root)) { + int nclaimed; + int err; - if (err) { - mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", - err); - return err; - } - if (nclaimed) - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), + &nclaimed, mlx5_core_is_ecpf(dev)); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", + err, func_id); + return err; } + + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } - } while (p); + } + + return 0; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + struct rb_root *root; + unsigned long id; + void *entry; + + xa_for_each(&dev->priv.page_root_xa, id, entry) { + root = entry; + mlx5_reclaim_root_pages(dev, root, id); + xa_erase(&dev->priv.page_root_xa, id); + kfree(root); + } + + WARN_ON(!xa_empty(&dev->priv.page_root_xa)); WARN(dev->priv.fw_pages, "FW pages counter is %d after reclaiming all pages\n", @@ -605,17 +670,19 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) int mlx5_pagealloc_init(struct mlx5_core_dev *dev) { - dev->priv.page_root = RB_ROOT; INIT_LIST_HEAD(&dev->priv.free_list); dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; + xa_init(&dev->priv.page_root_xa); + return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { + xa_destroy(&dev->priv.page_root_xa); destroy_workqueue(dev->priv.pg_wq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 31abcbb95ca2..c63f727273d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -395,7 +395,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, /* Check that all mask fields were consumed */ for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) { if (((u8 *)&mask)[i] != 0) { - mlx5dr_err(dmn, "Mask contains unsupported parameters\n"); + mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n"); return -EOPNOTSUPP; } } @@ -474,14 +474,13 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher) int ret; next_matcher = NULL; - if (!list_empty(&tbl->matcher_list)) - list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { - if (tmp_matcher->prio >= matcher->prio) { - next_matcher = tmp_matcher; - break; - } - first = false; + list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { + if (tmp_matcher->prio >= matcher->prio) { + next_matcher = tmp_matcher; + break; } + first = false; + } prev_matcher = NULL; if (next_matcher && !first) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index cd708dcc2e3a..6ec5106bc472 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -574,9 +574,8 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, { struct mlx5dr_rule_member *rule_mem; - if (!list_empty(&ste->rule_list)) - list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) - rule_mem->ste = new_ste; + list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) + rule_mem->ste = new_ste; } static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 88cdb9bb4c4a..bdafc85fd874 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -110,7 +110,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) break; - /* fall through */ + fallthrough; case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6a97ad601991..a0fcc4d13e93 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -541,7 +541,7 @@ struct mlx5_priv { /* pages stuff */ struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct rb_root page_root; + struct xarray page_root_xa; int fw_pages; atomic_t reg_pages; struct list_head free_list;