Merge branch 'mlx5-misc-patches-2023-06-13'
Tariq Toukan says:
====================
mlx5 misc patches 2023-06-13
This patchset contains small code cleanups and enhancements from the
team to the mlx5 core and Eth drivers.
Series generated against:
commit 3ec8d7572a
("CDC-NCM: add support for Apple's private interface")
====================
Link: https://lore.kernel.org/r/20240613210036.1125203-1-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
ad46951bfb
@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv {
|
|||||||
struct rhashtable ct_tuples_nat_ht;
|
struct rhashtable ct_tuples_nat_ht;
|
||||||
struct mlx5_flow_table *ct;
|
struct mlx5_flow_table *ct;
|
||||||
struct mlx5_flow_table *ct_nat;
|
struct mlx5_flow_table *ct_nat;
|
||||||
|
struct mlx5_flow_group *ct_nat_miss_group;
|
||||||
|
struct mlx5_flow_handle *ct_nat_miss_rule;
|
||||||
struct mlx5e_post_act *post_act;
|
struct mlx5e_post_act *post_act;
|
||||||
struct mutex control_lock; /* guards parallel adds/dels */
|
struct mutex control_lock; /* guards parallel adds/dels */
|
||||||
struct mapping_ctx *zone_mapping;
|
struct mapping_ctx *zone_mapping;
|
||||||
@ -141,6 +143,8 @@ struct mlx5_ct_counter {
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX5_CT_ENTRY_FLAG_VALID,
|
MLX5_CT_ENTRY_FLAG_VALID,
|
||||||
|
MLX5_CT_ENTRY_IN_CT_TABLE,
|
||||||
|
MLX5_CT_ENTRY_IN_CT_NAT_TABLE,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ct_entry {
|
struct mlx5_ct_entry {
|
||||||
@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
|
mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry)
|
||||||
{
|
{
|
||||||
return !!(entry->tuple_nat_node.next);
|
return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry)
|
||||||
|
{
|
||||||
|
return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -526,8 +536,10 @@ static void
|
|||||||
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
|
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
|
||||||
struct mlx5_ct_entry *entry)
|
struct mlx5_ct_entry *entry)
|
||||||
{
|
{
|
||||||
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
|
if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
|
||||||
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
|
||||||
|
if (mlx5_tc_ct_entry_in_ct_table(entry))
|
||||||
|
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
||||||
|
|
||||||
atomic_dec(&ct_priv->debugfs.stats.offloaded);
|
atomic_dec(&ct_priv->debugfs.stats.offloaded);
|
||||||
}
|
}
|
||||||
@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
|||||||
&zone_rule->mh,
|
&zone_rule->mh,
|
||||||
zone_restore_id,
|
zone_restore_id,
|
||||||
nat,
|
nat,
|
||||||
mlx5_tc_ct_entry_has_nat(entry));
|
mlx5_tc_ct_entry_in_ct_nat_table(entry));
|
||||||
if (err) {
|
if (err) {
|
||||||
ct_dbg("Failed to create ct entry mod hdr");
|
ct_dbg("Failed to create ct entry mod hdr");
|
||||||
goto err_mod_hdr;
|
goto err_mod_hdr;
|
||||||
@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
|
|||||||
*old_attr = *attr;
|
*old_attr = *attr;
|
||||||
|
|
||||||
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
|
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
|
||||||
nat, mlx5_tc_ct_entry_has_nat(entry));
|
nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
|
||||||
if (err) {
|
if (err) {
|
||||||
ct_dbg("Failed to create ct entry mod hdr");
|
ct_dbg("Failed to create ct entry mod hdr");
|
||||||
goto err_mod_hdr;
|
goto err_mod_hdr;
|
||||||
@ -957,11 +969,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
|
|||||||
{
|
{
|
||||||
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
|
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
|
||||||
|
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
|
||||||
&entry->tuple_nat_node,
|
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
||||||
tuples_nat_ht_params);
|
&entry->tuple_nat_node,
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
|
tuples_nat_ht_params);
|
||||||
tuples_ht_params);
|
if (mlx5_tc_ct_entry_in_ct_table(entry))
|
||||||
|
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
|
||||||
|
tuples_ht_params);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
|
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
|
||||||
@ -1100,21 +1114,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
|
if (mlx5_tc_ct_entry_in_ct_table(entry)) {
|
||||||
zone_restore_id);
|
err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
|
||||||
if (err)
|
zone_restore_id);
|
||||||
goto err_orig;
|
if (err)
|
||||||
|
goto err_orig;
|
||||||
|
}
|
||||||
|
|
||||||
err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
|
if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
|
||||||
zone_restore_id);
|
err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
|
||||||
if (err)
|
zone_restore_id);
|
||||||
goto err_nat;
|
if (err)
|
||||||
|
goto err_nat;
|
||||||
|
}
|
||||||
|
|
||||||
atomic_inc(&ct_priv->debugfs.stats.offloaded);
|
atomic_inc(&ct_priv->debugfs.stats.offloaded);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_nat:
|
err_nat:
|
||||||
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
if (mlx5_tc_ct_entry_in_ct_table(entry))
|
||||||
|
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
||||||
err_orig:
|
err_orig:
|
||||||
mlx5_tc_ct_counter_put(ct_priv, entry);
|
mlx5_tc_ct_counter_put(ct_priv, entry);
|
||||||
return err;
|
return err;
|
||||||
@ -1128,15 +1147,19 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
|
if (mlx5_tc_ct_entry_in_ct_table(entry)) {
|
||||||
zone_restore_id);
|
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
|
||||||
if (err)
|
zone_restore_id);
|
||||||
return err;
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
|
if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
|
||||||
zone_restore_id);
|
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
|
||||||
if (err)
|
zone_restore_id);
|
||||||
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
if (err && mlx5_tc_ct_entry_in_ct_table(entry))
|
||||||
|
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_entries;
|
goto err_entries;
|
||||||
|
|
||||||
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
|
|
||||||
&entry->tuple_node,
|
|
||||||
tuples_ht_params);
|
|
||||||
if (err)
|
|
||||||
goto err_tuple;
|
|
||||||
|
|
||||||
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
|
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
|
||||||
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
|
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
|
||||||
&entry->tuple_nat_node,
|
&entry->tuple_nat_node,
|
||||||
tuples_nat_ht_params);
|
tuples_nat_ht_params);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_tuple_nat;
|
goto err_tuple_nat;
|
||||||
|
|
||||||
|
set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
|
||||||
|
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
|
||||||
|
&entry->tuple_node,
|
||||||
|
tuples_ht_params);
|
||||||
|
if (err)
|
||||||
|
goto err_tuple;
|
||||||
|
|
||||||
|
set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&ct_priv->ht_lock);
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
|
|||||||
|
|
||||||
err_rules:
|
err_rules:
|
||||||
spin_lock_bh(&ct_priv->ht_lock);
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
if (mlx5_tc_ct_entry_has_nat(entry))
|
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
|
||||||
&entry->tuple_nat_node, tuples_nat_ht_params);
|
|
||||||
err_tuple_nat:
|
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
|
|
||||||
&entry->tuple_node,
|
|
||||||
tuples_ht_params);
|
|
||||||
err_tuple:
|
err_tuple:
|
||||||
rhashtable_remove_fast(&ft->ct_entries_ht,
|
mlx5_tc_ct_entry_remove_from_tuples(entry);
|
||||||
&entry->node,
|
err_tuple_nat:
|
||||||
cts_ht_params);
|
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
|
||||||
err_entries:
|
err_entries:
|
||||||
spin_unlock_bh(&ct_priv->ht_lock);
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
err_set:
|
err_set:
|
||||||
@ -2149,6 +2171,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
|
|||||||
debugfs_remove_recursive(ct_priv->debugfs.root);
|
debugfs_remove_recursive(ct_priv->debugfs.root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct mlx5_flow_handle *
|
||||||
|
tc_ct_add_miss_rule(struct mlx5_flow_table *ft,
|
||||||
|
struct mlx5_flow_table *next_ft)
|
||||||
|
{
|
||||||
|
struct mlx5_flow_destination dest = {};
|
||||||
|
struct mlx5_flow_act act = {};
|
||||||
|
|
||||||
|
act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
|
||||||
|
act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||||
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
|
dest.ft = next_ft;
|
||||||
|
|
||||||
|
return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from,
|
||||||
|
struct mlx5_flow_table *to,
|
||||||
|
struct mlx5_flow_group **miss_group,
|
||||||
|
struct mlx5_flow_handle **miss_rule)
|
||||||
|
{
|
||||||
|
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||||
|
struct mlx5_flow_group *group;
|
||||||
|
struct mlx5_flow_handle *rule;
|
||||||
|
unsigned int max_fte = from->max_fte;
|
||||||
|
u32 *flow_group_in;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||||
|
if (!flow_group_in)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* create miss group */
|
||||||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
|
||||||
|
max_fte - 2);
|
||||||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
|
||||||
|
max_fte - 1);
|
||||||
|
group = mlx5_create_flow_group(from, flow_group_in);
|
||||||
|
if (IS_ERR(group)) {
|
||||||
|
err = PTR_ERR(group);
|
||||||
|
goto err_miss_grp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add miss rule to next fdb */
|
||||||
|
rule = tc_ct_add_miss_rule(from, to);
|
||||||
|
if (IS_ERR(rule)) {
|
||||||
|
err = PTR_ERR(rule);
|
||||||
|
goto err_miss_rule;
|
||||||
|
}
|
||||||
|
|
||||||
|
*miss_group = group;
|
||||||
|
*miss_rule = rule;
|
||||||
|
kvfree(flow_group_in);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_miss_rule:
|
||||||
|
mlx5_destroy_flow_group(group);
|
||||||
|
err_miss_grp:
|
||||||
|
kvfree(flow_group_in);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group,
|
||||||
|
struct mlx5_flow_handle *miss_rule)
|
||||||
|
{
|
||||||
|
mlx5_del_flow_rules(miss_rule);
|
||||||
|
mlx5_destroy_flow_group(miss_group);
|
||||||
|
}
|
||||||
|
|
||||||
#define INIT_ERR_PREFIX "tc ct offload init failed"
|
#define INIT_ERR_PREFIX "tc ct offload init failed"
|
||||||
|
|
||||||
struct mlx5_tc_ct_priv *
|
struct mlx5_tc_ct_priv *
|
||||||
@ -2212,6 +2304,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
|
|||||||
goto err_ct_nat_tbl;
|
goto err_ct_nat_tbl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct,
|
||||||
|
&ct_priv->ct_nat_miss_group,
|
||||||
|
&ct_priv->ct_nat_miss_rule);
|
||||||
|
if (err)
|
||||||
|
goto err_ct_zone_ht;
|
||||||
|
|
||||||
ct_priv->post_act = post_act;
|
ct_priv->post_act = post_act;
|
||||||
mutex_init(&ct_priv->control_lock);
|
mutex_init(&ct_priv->control_lock);
|
||||||
if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
|
if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
|
||||||
@ -2273,6 +2371,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
|
|||||||
ct_priv->fs_ops->destroy(ct_priv->fs);
|
ct_priv->fs_ops->destroy(ct_priv->fs);
|
||||||
kfree(ct_priv->fs);
|
kfree(ct_priv->fs);
|
||||||
|
|
||||||
|
tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule);
|
||||||
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
|
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
|
||||||
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
|
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
|
||||||
mapping_destroy(ct_priv->zone_mapping);
|
mapping_destroy(ct_priv->zone_mapping);
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
|
|
||||||
#include "en.h"
|
#include "en.h"
|
||||||
#include <linux/indirect_call_wrapper.h>
|
#include <linux/indirect_call_wrapper.h>
|
||||||
|
#include <net/ip6_checksum.h>
|
||||||
|
#include <net/tcp.h>
|
||||||
|
|
||||||
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
|
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
|
||||||
|
|
||||||
@ -479,6 +481,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
|
||||||
|
{
|
||||||
|
const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
|
||||||
|
const struct ipv6hdr *ip6;
|
||||||
|
struct tcphdr *th;
|
||||||
|
struct udphdr *uh;
|
||||||
|
int len;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (skb_is_gso_tcp(skb)) {
|
||||||
|
th = inner_tcp_hdr(skb);
|
||||||
|
len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
|
||||||
|
|
||||||
|
if (ip->version == 4) {
|
||||||
|
th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
|
||||||
|
} else {
|
||||||
|
ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
|
||||||
|
th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
|
||||||
|
}
|
||||||
|
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
|
||||||
|
uh = (struct udphdr *)skb_inner_transport_header(skb);
|
||||||
|
len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
|
||||||
|
|
||||||
|
if (ip->version == 4) {
|
||||||
|
uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
|
||||||
|
} else {
|
||||||
|
ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
|
||||||
|
uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
|
#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
|
||||||
|
|
||||||
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
|
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
|
||||||
|
@ -116,6 +116,7 @@ static inline bool
|
|||||||
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||||
struct mlx5_wqe_eth_seg *eseg)
|
struct mlx5_wqe_eth_seg *eseg)
|
||||||
{
|
{
|
||||||
|
struct mlx5_core_dev *mdev = sq->mdev;
|
||||||
u8 inner_ipproto;
|
u8 inner_ipproto;
|
||||||
|
|
||||||
if (!mlx5e_ipsec_eseg_meta(eseg))
|
if (!mlx5e_ipsec_eseg_meta(eseg))
|
||||||
@ -125,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||||||
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
|
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
|
||||||
if (inner_ipproto) {
|
if (inner_ipproto) {
|
||||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||||
if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
|
if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) {
|
||||||
|
mlx5e_swp_encap_csum_partial(mdev, skb, true);
|
||||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
|
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||||
|
}
|
||||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||||
|
mlx5e_swp_encap_csum_partial(mdev, skb, false);
|
||||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||||
sq->stats->csum_partial_inner++;
|
sq->stats->csum_partial_inner++;
|
||||||
}
|
}
|
||||||
|
@ -4910,7 +4910,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
|
/* Disable CSUM and GSO if skb cannot be offloaded by HW */
|
||||||
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1182,9 +1182,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
|||||||
check = csum_partial(tcp, tcp->doff * 4,
|
check = csum_partial(tcp, tcp->doff * 4,
|
||||||
csum_unfold((__force __sum16)cqe->check_sum));
|
csum_unfold((__force __sum16)cqe->check_sum));
|
||||||
/* Almost done, don't forget the pseudo header */
|
/* Almost done, don't forget the pseudo header */
|
||||||
tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
|
tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
|
||||||
tot_len - sizeof(struct iphdr),
|
ipv4->saddr, ipv4->daddr, check);
|
||||||
IPPROTO_TCP, check);
|
|
||||||
} else {
|
} else {
|
||||||
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
|
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
|
||||||
struct ipv6hdr *ipv6 = ip_p;
|
struct ipv6hdr *ipv6 = ip_p;
|
||||||
@ -1199,8 +1198,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
|||||||
check = csum_partial(tcp, tcp->doff * 4,
|
check = csum_partial(tcp, tcp->doff * 4,
|
||||||
csum_unfold((__force __sum16)cqe->check_sum));
|
csum_unfold((__force __sum16)cqe->check_sum));
|
||||||
/* Almost done, don't forget the pseudo header */
|
/* Almost done, don't forget the pseudo header */
|
||||||
tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
|
tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
|
||||||
IPPROTO_TCP, check);
|
&ipv6->daddr, check);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -531,7 +531,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
|
|||||||
switch (type) {
|
switch (type) {
|
||||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
|
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
|
||||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||||
ELEMENT_TYPE_CAP_MASK_TASR;
|
ELEMENT_TYPE_CAP_MASK_TSAR;
|
||||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
|
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
|
||||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||||
ELEMENT_TYPE_CAP_MASK_VPORT;
|
ELEMENT_TYPE_CAP_MASK_VPORT;
|
||||||
|
@ -3353,9 +3353,9 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
|
|||||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||||
|
|
||||||
if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
|
if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
|
||||||
strcpy(ctx->val.vstr, "smfs");
|
strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
|
||||||
else
|
else
|
||||||
strcpy(ctx->val.vstr, "dmfs");
|
strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1093,7 +1093,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
|||||||
u8 tunnel_stateless_ip_over_ip_tx[0x1];
|
u8 tunnel_stateless_ip_over_ip_tx[0x1];
|
||||||
u8 reserved_at_2e[0x2];
|
u8 reserved_at_2e[0x2];
|
||||||
u8 max_vxlan_udp_ports[0x8];
|
u8 max_vxlan_udp_ports[0x8];
|
||||||
u8 reserved_at_38[0x6];
|
u8 swp_csum_l4_partial[0x1];
|
||||||
|
u8 reserved_at_39[0x5];
|
||||||
u8 max_geneve_opt_len[0x1];
|
u8 max_geneve_opt_len[0x1];
|
||||||
u8 tunnel_stateless_geneve_rx[0x1];
|
u8 tunnel_stateless_geneve_rx[0x1];
|
||||||
|
|
||||||
@ -3914,7 +3915,7 @@ enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
|
ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
|
||||||
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
|
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
|
||||||
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
|
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
|
||||||
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
|
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
|
||||||
|
Loading…
Reference in New Issue
Block a user