From b47dddc624efbc6c31afe0f4ee415ccbdeb0e4ce Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 6 Nov 2022 12:50:41 +0200 Subject: [PATCH 01/15] net/mlx5: DR, Move ACTION_CACHE_LINE_SIZE macro to header Move ACTION_CACHE_LINE_SIZE macro to header to be used by the pattern functions as well. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/steering/dr_action.c | 10 ++++------ .../ethernet/mellanox/mlx5/core/steering/dr_types.h | 1 + 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index ee104cf04392..724e4d9e70ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1365,8 +1365,6 @@ out_err: return -EINVAL; } -#define ACTION_CACHE_LINE_SIZE 64 - static int dr_action_create_reformat_action(struct mlx5dr_domain *dmn, u8 reformat_param_0, u8 reformat_param_1, @@ -1403,13 +1401,13 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, } case DR_ACTION_TYP_TNL_L3_TO_L2: { - u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {}; + u8 hw_actions[DR_ACTION_CACHE_LINE_SIZE] = {}; int ret; ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx, data, data_sz, hw_actions, - ACTION_CACHE_LINE_SIZE, + DR_ACTION_CACHE_LINE_SIZE, &action->rewrite->num_of_actions); if (ret) { mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); @@ -1427,7 +1425,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr (action->rewrite->chunk) - dmn->info.caps.hdr_modify_icm_addr) / - ACTION_CACHE_LINE_SIZE; + DR_ACTION_CACHE_LINE_SIZE; ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { @@ -2006,7 +2004,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, action->rewrite->num_of_actions = num_hw_actions; action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) - dmn->info.caps.hdr_modify_icm_addr) / - ACTION_CACHE_LINE_SIZE; + DR_ACTION_CACHE_LINE_SIZE; ret = mlx5dr_send_postsend_action(dmn, action); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 5b9faa714f42..7453fc6df494 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -21,6 +21,7 @@ #define DR_NUM_OF_FLEX_PARSERS 8 #define DR_STE_MAX_FLEX_0_ID 3 #define DR_STE_MAX_FLEX_1_ID 7 +#define DR_ACTION_CACHE_LINE_SIZE 64 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) From da5d0027d666697f28af65fd0d975e9c7e2327c6 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 6 Nov 2022 12:52:23 +0200 Subject: [PATCH 02/15] net/mlx5: DR, Add cache for modify header pattern Starting with ConnectX-6 Dx, we use new design of modify_header FW object. The current modify_header object allows for having only limited number of FW objects, so the new design of pattern and argument allows pattern reuse, saving memory, and having a large number of modify_header objects. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_ptrn.c | 198 ++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_send.c | 20 ++ .../mellanox/mlx5/core/steering/dr_types.h | 18 ++ 3 files changed, 236 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c index 698e79d278bf..13e06a6a6b22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c @@ -2,12 +2,198 @@ // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. #include "dr_types.h" +#include "mlx5_ifc_dr_ste_v1.h" + +enum dr_ptrn_modify_hdr_action_id { + DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00, + DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05, + DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06, + DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07, + DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a, +}; struct mlx5dr_ptrn_mgr { struct mlx5dr_domain *dmn; struct mlx5dr_icm_pool *ptrn_icm_pool; + /* cache for modify_header ptrn */ + struct list_head ptrn_list; + struct mutex modify_hdr_mutex; /* protect the pattern cache */ }; +/* Cache structure and functions */ +static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions, + __be64 cur_hw_actions[], + size_t num_of_actions, + __be64 hw_actions[]) +{ + int i; + + if (cur_num_of_actions != num_of_actions) + return false; + + for (i = 0; i < num_of_actions; i++) { + u8 action_id = + MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id); + + if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) { + if (hw_actions[i] != cur_hw_actions[i]) + return false; + } else { + if ((__force __be32)hw_actions[i] != + (__force __be32)cur_hw_actions[i]) + return false; + } + } + + return true; +} + +static struct mlx5dr_ptrn_obj * +dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr, + size_t num_of_actions, + __be64 hw_actions[]) +{ + struct mlx5dr_ptrn_obj *cached_pattern; + struct mlx5dr_ptrn_obj *tmp; + + list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) { + if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions, + (__be64 *)cached_pattern->data, + num_of_actions, + hw_actions)) { + /* Put this pattern in the head of the list, + * as we will probably use it more. + */ + list_del_init(&cached_pattern->list); + list_add(&cached_pattern->list, &mgr->ptrn_list); + return cached_pattern; + } + } + + return NULL; +} + +static struct mlx5dr_ptrn_obj * +dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr, + u16 num_of_actions, u8 *data) +{ + struct mlx5dr_ptrn_obj *pattern; + struct mlx5dr_icm_chunk *chunk; + u32 chunk_size; + u32 index; + + chunk_size = ilog2(num_of_actions); + /* HW modify action index granularity is at least 64B */ + chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8); + + chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size); + if (!chunk) + return NULL; + + index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) - + mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) / + DR_ACTION_CACHE_LINE_SIZE; + + pattern = kzalloc(sizeof(*pattern), GFP_KERNEL); + if (!pattern) + goto free_chunk; + + pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE * + sizeof(*pattern->data), GFP_KERNEL); + if (!pattern->data) + goto free_pattern; + + memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE); + pattern->chunk = chunk; + pattern->index = index; + pattern->num_of_actions = num_of_actions; + + list_add(&pattern->list, &mgr->ptrn_list); + refcount_set(&pattern->refcount, 1); + + return pattern; + +free_pattern: + kfree(pattern); +free_chunk: + mlx5dr_icm_free_chunk(chunk); + return NULL; +} + +static void +dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern) +{ + list_del(&pattern->list); + mlx5dr_icm_free_chunk(pattern->chunk); + kfree(pattern->data); + kfree(pattern); +} + +struct mlx5dr_ptrn_obj * +mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr, + u16 num_of_actions, + u8 *data) +{ + struct mlx5dr_ptrn_obj *pattern; + u64 *hw_actions; + u8 action_id; + int i; + + mutex_lock(&mgr->modify_hdr_mutex); + pattern = dr_ptrn_find_cached_pattern(mgr, + num_of_actions, + (__be64 *)data); + if (!pattern) { + /* Alloc and add new pattern to cache */ + pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data); + if (!pattern) + goto out_unlock; + + hw_actions = (u64 *)pattern->data; + /* Here we mask the pattern data to create a valid pattern + * since we do an OR operation between the arg and pattern + */ + for (i = 0; i < num_of_actions; i++) { + action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id); + + if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET || + action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD || + action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE) + MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0); + } + + if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk, + num_of_actions, pattern->data)) { + refcount_dec(&pattern->refcount); + goto free_pattern; + } + } else { + refcount_inc(&pattern->refcount); + } + + mutex_unlock(&mgr->modify_hdr_mutex); + + return pattern; + +free_pattern: + dr_ptrn_free_pattern(pattern); +out_unlock: + mutex_unlock(&mgr->modify_hdr_mutex); + return NULL; +} + +void +mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr, + struct mlx5dr_ptrn_obj *pattern) +{ + mutex_lock(&mgr->modify_hdr_mutex); + + if (refcount_dec_and_test(&pattern->refcount)) + dr_ptrn_free_pattern(pattern); + + mutex_unlock(&mgr->modify_hdr_mutex); +} + struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) { struct mlx5dr_ptrn_mgr *mgr; @@ -26,6 +212,7 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) goto free_mgr; } + INIT_LIST_HEAD(&mgr->ptrn_list); return mgr; free_mgr: @@ -35,9 +222,20 @@ free_mgr: void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr) { + struct mlx5dr_ptrn_obj *pattern; + struct mlx5dr_ptrn_obj *tmp; + if (!mgr) return; + WARN_ON(!list_empty(&mgr->ptrn_list)); + + list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) { + list_del(&pattern->list); + kfree(pattern->data); + kfree(pattern); + } + mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool); kfree(mgr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 00bb65613300..78756840d263 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -754,6 +754,26 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, return dr_postsend_icm_data(dmn, &send_info); } +int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn, + struct mlx5dr_icm_chunk *chunk, + u16 num_of_actions, + u8 *data) +{ + struct postsend_info send_info = {}; + int ret; + + send_info.write.addr = (uintptr_t)data; + send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE; + send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk); + send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk); + + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + return ret; + + return 0; +} + static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev, struct mlx5dr_qp *dr_qp, int port) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 7453fc6df494..097f1f389b76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1002,6 +1002,15 @@ struct mlx5dr_ste_action_modify_field { u8 l4_type; }; +struct mlx5dr_ptrn_obj { + struct mlx5dr_icm_chunk *chunk; + u8 *data; + u16 num_of_actions; + u32 index; + refcount_t refcount; + struct list_head list; +}; + struct mlx5dr_action_rewrite { struct mlx5dr_domain *dmn; struct mlx5dr_icm_chunk *chunk; @@ -1011,6 +1020,7 @@ struct mlx5dr_action_rewrite { u8 allow_rx:1; u8 allow_tx:1; u8 modify_ttl:1; + struct mlx5dr_ptrn_obj *ptrn; }; struct mlx5dr_action_reformat { @@ -1448,6 +1458,10 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, bool update_hw_ste); int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, struct mlx5dr_action *action); +int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn, + struct mlx5dr_icm_chunk *chunk, + u16 num_of_actions, + u8 *data); int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn); void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn); @@ -1537,5 +1551,9 @@ static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev) bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn); struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn); void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr); +struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr, + u16 num_of_actions, u8 *data); +void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr, + struct mlx5dr_ptrn_obj *pattern); #endif /* _DR_TYPES_H_ */ From 2533e726f4725b00a43b2d087480c7f2f414b56d Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 30 Aug 2022 01:21:05 +0300 Subject: [PATCH 03/15] net/mlx5: DR, Split chunk allocation to HW-dependent ways This way we are able to allocate chunk for modify_headers from 2 types: STEv0 that is allocated from the action area, and STEv1 that is allocating the chunks from the special area for patterns. Signed-off-by: Muhammad Sammar Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_action.c | 22 ++----- .../mellanox/mlx5/core/steering/dr_ste.c | 57 +++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_ste.h | 2 + .../mellanox/mlx5/core/steering/dr_ste_v1.c | 29 ++++++++++ .../mellanox/mlx5/core/steering/dr_ste_v1.h | 2 + .../mellanox/mlx5/core/steering/dr_ste_v2.c | 2 + .../mellanox/mlx5/core/steering/dr_types.h | 2 + 7 files changed, 98 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 724e4d9e70ac..732a4002eab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1961,7 +1961,6 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, __be64 actions[], struct mlx5dr_action *action) { - struct mlx5dr_icm_chunk *chunk; u32 max_hw_actions; u32 num_hw_actions; u32 num_sw_actions; @@ -1978,15 +1977,9 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, return -EINVAL; } - chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16); - if (!chunk) - return -ENOMEM; - hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL); - if (!hw_actions) { - ret = -ENOMEM; - goto free_chunk; - } + if (!hw_actions) + return -ENOMEM; ret = dr_actions_convert_modify_header(action, max_hw_actions, @@ -1998,15 +1991,11 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, if (ret) goto free_hw_actions; - action->rewrite->chunk = chunk; action->rewrite->modify_ttl = modify_ttl; action->rewrite->data = (u8 *)hw_actions; action->rewrite->num_of_actions = num_hw_actions; - action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) - - dmn->info.caps.hdr_modify_icm_addr) / - DR_ACTION_CACHE_LINE_SIZE; - ret = mlx5dr_send_postsend_action(dmn, action); + ret = mlx5dr_ste_alloc_modify_hdr(action); if (ret) goto free_hw_actions; @@ -2014,8 +2003,6 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, free_hw_actions: kfree(hw_actions); -free_chunk: - mlx5dr_icm_free_chunk(chunk); return ret; } @@ -2171,8 +2158,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_MODIFY_HDR: - mlx5dr_icm_free_chunk(action->rewrite->chunk); - kfree(action->rewrite->data); + mlx5dr_ste_free_modify_hdr(action); refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_SAMPLER: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 1e15f605df6e..9413aaf51251 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -633,6 +633,63 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, used_hw_action_num); } +static int +dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action) +{ + struct mlx5dr_domain *dmn = action->rewrite->dmn; + u32 chunk_size; + int ret; + + chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions)); + + /* HW modify action index granularity is at least 64B */ + chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8); + + action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, + chunk_size); + if (!action->rewrite->chunk) + return -ENOMEM; + + action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) - + dmn->info.caps.hdr_modify_icm_addr) / + DR_ACTION_CACHE_LINE_SIZE; + + ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action); + if (ret) + goto free_chunk; + + return 0; + +free_chunk: + mlx5dr_icm_free_chunk(action->rewrite->chunk); + return -ENOMEM; +} + +static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action) +{ + mlx5dr_icm_free_chunk(action->rewrite->chunk); +} + +int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action) +{ + struct mlx5dr_domain *dmn = action->rewrite->dmn; + + if (mlx5dr_domain_is_support_ptrn_arg(dmn)) + return dmn->ste_ctx->alloc_modify_hdr_chunk(action); + + return dr_ste_alloc_modify_hdr_chunk(action); +} + +void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action) +{ + struct mlx5dr_domain *dmn = action->rewrite->dmn; + + if (mlx5dr_domain_is_support_ptrn_arg(dmn)) + return dmn->ste_ctx->dealloc_modify_hdr_chunk(action); + + return dr_ste_free_modify_hdr_chunk(action); +} + static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, struct mlx5dr_match_spec *spec) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 7075142bcfb6..54a6619c3ecb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -195,6 +195,8 @@ struct mlx5dr_ste_ctx { u8 *hw_action, u32 hw_action_sz, u16 *used_hw_action_num); + int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action); + void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action); /* Send */ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 27cc6931bbde..cf8508139f55 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -2176,6 +2176,32 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag; } +int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action) +{ + struct mlx5dr_ptrn_mgr *ptrn_mgr; + + ptrn_mgr = action->rewrite->dmn->ptrn_mgr; + if (!ptrn_mgr) + return -EOPNOTSUPP; + + action->rewrite->ptrn = + mlx5dr_ptrn_cache_get_pattern(ptrn_mgr, + action->rewrite->num_of_actions, + action->rewrite->data); + if (!action->rewrite->ptrn) { + mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n"); + return -EAGAIN; + } + + return 0; +} + +void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action) +{ + mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr, + action->rewrite->ptrn); +} + static struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, @@ -2232,6 +2258,9 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_action_add = &dr_ste_v1_set_action_add, .set_action_copy = &dr_ste_v1_set_action_copy, .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, + .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, + /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h index b5c0f0f8392f..e2fc69867088 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h @@ -31,6 +31,8 @@ void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter, u8 dst_len, u8 src_hw_field, u8 src_shifter); int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, u32 hw_action_sz, u16 *used_hw_action_num); +int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action); +void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action); void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask); void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c index cf1a3c9a1cf4..808b013cf48c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c @@ -221,6 +221,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = { .set_action_add = &dr_ste_v1_set_action_add, .set_action_copy = &dr_ste_v1_set_action_copy, .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, + .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 097f1f389b76..a1c549fed9ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -335,6 +335,8 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_action, u32 hw_action_sz, u16 *used_hw_action_num); +int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action); +void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action); const struct mlx5dr_ste_action_modify_field * mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field); From b7ba743a2f1c7d14f281c6d1a2877984008e28e9 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 30 Aug 2022 01:21:10 +0300 Subject: [PATCH 04/15] net/mlx5: DR, Check for modify_header_argument device capabilities Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c | 11 +++++++++++ .../ethernet/mellanox/mlx5/core/steering/dr_types.h | 3 +++ 2 files changed, 14 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 229f3684100c..e2cbc2b5bc27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -132,6 +132,17 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); + caps->support_modify_argument = + MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT; + + if (caps->support_modify_argument) { + caps->log_header_modify_argument_granularity = + MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity); + caps->log_header_modify_argument_max_alloc = + MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc); + } + /* geneve_tlv_option_0_exist is the indication of * STE support for lookup type flex_parser_ok */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index a1c549fed9ca..9187e9d6ea54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -896,6 +896,9 @@ struct mlx5dr_cmd_caps { struct mlx5dr_vports vports; bool prio_tag_required; struct mlx5dr_roce_cap roce_caps; + u16 log_header_modify_argument_granularity; + u16 log_header_modify_argument_max_alloc; + bool support_modify_argument; u8 is_ecpf:1; u8 isolate_vl_tc:1; }; From de69696b6eeef3c0704fdb3bcd473ddb4757130e Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 30 Aug 2022 01:18:29 +0300 Subject: [PATCH 05/15] net/mlx5: DR, Add create/destroy for modify-header-argument general object Add functions for creation/destruction of the new type of general object. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_cmd.c | 43 +++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_types.h | 6 +++ 2 files changed, 49 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index e2cbc2b5bc27..3835ba3f4dda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -693,6 +693,49 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, return 0; } +int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev, + u16 log_obj_range, u32 pd, + u32 *obj_id) +{ + u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, attr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type, + MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT); + MLX5_SET(general_obj_in_cmd_hdr, attr, + op_param.create.log_obj_range, log_obj_range); + + attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg); + MLX5_SET(modify_header_arg, attr, access_pd, pd); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + return 0; +} + +void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev, + u32 obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, struct mlx5dr_cmd_fte_info *fte, bool *extended_dest) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 9187e9d6ea54..0075e2c7a441 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1357,6 +1357,12 @@ struct mlx5dr_cmd_gid_attr { int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, u16 index, struct mlx5dr_cmd_gid_attr *attr); +int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev, + u16 log_obj_range, u32 pd, + u32 *obj_id); +void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev, + u32 obj_id); + struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, enum mlx5dr_icm_type icm_type); void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool); From 4605fc0a2b652070f45b0a17967c2cb11a9e5a6e Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 8 Nov 2022 14:27:03 +0200 Subject: [PATCH 06/15] net/mlx5: DR, Add support for writing modify header argument The accelerated modify header arguments are written in the HW area with special WQE and specific data format. New function was added to support writing of new argument type. Note that GTA WQE is larger than READ and WRITE, so the queue management logic was updated to support this. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_send.c | 167 +++++++++++++++--- .../mellanox/mlx5/core/steering/dr_types.h | 3 + 2 files changed, 150 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 78756840d263..d7c7363f9096 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -20,6 +20,7 @@ struct dr_data_seg { enum send_info_type { WRITE_ICM = 0, + GTA_ARG = 1, }; struct postsend_info { @@ -269,6 +270,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, dr_qp->rq.wqe_cnt = 4; dr_qp->sq.pc = 0; dr_qp->sq.cc = 0; + dr_qp->sq.head = 0; dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr); MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); @@ -367,39 +369,113 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET); } -static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, - u32 rkey, struct dr_data_seg *data_seg, - u32 opcode, bool notify_hw) +static void +dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl, + u32 remote_addr, + struct dr_data_seg *data_seg, + int *size) +{ + struct mlx5_wqe_header_modify_argument_update_seg *wq_arg_seg; + struct mlx5_wqe_flow_update_ctrl_seg *wq_flow_seg; + + wq_ctrl->general_id = cpu_to_be32(remote_addr); + wq_flow_seg = (void *)(wq_ctrl + 1); + + /* mlx5_wqe_flow_update_ctrl_seg - all reserved */ + memset(wq_flow_seg, 0, sizeof(*wq_flow_seg)); + wq_arg_seg = (void *)(wq_flow_seg + 1); + + memcpy(wq_arg_seg->argument_list, + (void *)(uintptr_t)data_seg->addr, + data_seg->length); + + *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */ + sizeof(*wq_flow_seg) + /* WQE flow update ctrl seg - reserved */ + sizeof(*wq_arg_seg)) / /* WQE hdr modify arg seg - data */ + MLX5_SEND_WQE_DS; +} + +static void +dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl, + u64 remote_addr, + u32 rkey, + struct dr_data_seg *data_seg, + unsigned int *size) { struct mlx5_wqe_raddr_seg *wq_raddr; - struct mlx5_wqe_ctrl_seg *wq_ctrl; struct mlx5_wqe_data_seg *wq_dseg; - unsigned int size; - unsigned int idx; - size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 + - sizeof(*wq_raddr) / 16; - - idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); - - wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); - wq_ctrl->imm = 0; - wq_ctrl->fm_ce_se = (data_seg->send_flags) ? - MLX5_WQE_CTRL_CQ_UPDATE : 0; - wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) | - opcode); - wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8); wq_raddr = (void *)(wq_ctrl + 1); + wq_raddr->raddr = cpu_to_be64(remote_addr); wq_raddr->rkey = cpu_to_be32(rkey); wq_raddr->reserved = 0; wq_dseg = (void *)(wq_raddr + 1); + wq_dseg->byte_count = cpu_to_be32(data_seg->length); wq_dseg->lkey = cpu_to_be32(data_seg->lkey); wq_dseg->addr = cpu_to_be64(data_seg->addr); - dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; + *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */ + sizeof(*wq_dseg) + /* WQE data segment */ + sizeof(*wq_raddr)) / /* WQE remote addr segment */ + MLX5_SEND_WQE_DS; +} + +static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl, + struct dr_data_seg *data_seg) +{ + wq_ctrl->signature = 0; + wq_ctrl->rsvd[0] = 0; + wq_ctrl->rsvd[1] = 0; + wq_ctrl->fm_ce_se = data_seg->send_flags & IB_SEND_SIGNALED ? + MLX5_WQE_CTRL_CQ_UPDATE : 0; + wq_ctrl->imm = 0; +} + +static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, + u32 rkey, struct dr_data_seg *data_seg, + u32 opcode, bool notify_hw) +{ + struct mlx5_wqe_ctrl_seg *wq_ctrl; + int opcode_mod = 0; + unsigned int size; + unsigned int idx; + + idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); + + wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); + dr_set_ctrl_seg(wq_ctrl, data_seg); + + switch (opcode) { + case MLX5_OPCODE_RDMA_READ: + case MLX5_OPCODE_RDMA_WRITE: + dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr, + rkey, data_seg, &size); + break; + case MLX5_OPCODE_FLOW_TBL_ACCESS: + opcode_mod = MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT; + dr_rdma_handle_flow_access_arg_segments(wq_ctrl, remote_addr, + data_seg, &size); + break; + default: + WARN(true, "illegal opcode %d", opcode); + return; + } + + /* -------------------------------------------------------- + * |opcode_mod (8 bit)|wqe_index (16 bits)| opcod (8 bits)| + * -------------------------------------------------------- + */ + wq_ctrl->opmod_idx_opcode = + cpu_to_be32((opcode_mod << 24) | + ((dr_qp->sq.pc & 0xffff) << 8) | + opcode); + wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8); + + dr_qp->sq.pc += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); + dr_qp->sq.wqe_head[idx] = dr_qp->sq.head++; if (notify_hw) dr_cmd_notify_hw(dr_qp, wq_ctrl); @@ -412,7 +488,11 @@ static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_inf &send_info->write, MLX5_OPCODE_RDMA_WRITE, false); dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, &send_info->read, MLX5_OPCODE_RDMA_READ, true); + } else { /* GTA_ARG */ + dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, + &send_info->write, MLX5_OPCODE_FLOW_TBL_ACCESS, true); } + } /** @@ -478,11 +558,23 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, } else if (ne == 1) { send_ring->pending_wqe -= send_ring->signal_th; } - } while (is_drain && send_ring->pending_wqe); + } while (ne == 1 || + (is_drain && send_ring->pending_wqe >= send_ring->signal_th)); return 0; } +static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring, + struct postsend_info *send_info) +{ + send_ring->pending_wqe++; + + if (send_ring->pending_wqe % send_ring->signal_th == 0) + send_info->write.send_flags |= IB_SEND_SIGNALED; + else + send_info->write.send_flags = 0; +} + static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, struct mlx5dr_send_ring *send_ring, struct postsend_info *send_info) @@ -526,6 +618,8 @@ static void dr_fill_data_segs(struct mlx5dr_domain *dmn, { if (send_info->type == WRITE_ICM) dr_fill_write_icm_segs(dmn, send_ring, send_info); + else /* args */ + dr_fill_write_args_segs(send_ring, send_info); } static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, @@ -774,6 +868,39 @@ int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn, return 0; } +int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id, + u16 num_of_actions, u8 *actions_data) +{ + int data_len, iter = 0, cur_sent; + u64 addr; + int ret; + + addr = (uintptr_t)actions_data; + data_len = num_of_actions * DR_MODIFY_ACTION_SIZE; + + do { + struct postsend_info send_info = {}; + + send_info.type = GTA_ARG; + send_info.write.addr = addr; + cur_sent = min_t(u32, data_len, DR_ACTION_CACHE_LINE_SIZE); + send_info.write.length = cur_sent; + send_info.write.lkey = 0; + send_info.remote_addr = arg_id + iter; + + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + goto out; + + iter++; + addr += cur_sent; + data_len -= cur_sent; + } while (data_len > 0); + +out: + return ret; +} + static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev, struct mlx5dr_qp *dr_qp, int port) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 0075e2c7a441..7b35f78a84a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1397,6 +1397,7 @@ struct mlx5dr_qp { struct mlx5_wq_ctrl wq_ctrl; u32 qpn; struct { + unsigned int head; unsigned int pc; unsigned int cc; unsigned int size; @@ -1473,6 +1474,8 @@ int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn, struct mlx5dr_icm_chunk *chunk, u16 num_of_actions, u8 *data); +int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id, + u16 num_of_actions, u8 *actions_data); int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn); void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn); From 7d7c9453d679fe55d72d63bacb3b639cd963ebc0 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 15 Nov 2022 00:33:25 +0200 Subject: [PATCH 07/15] net/mlx5: DR, Read ICM memory into dedicated buffer Instead of using the write buffer for reading we will use a dedicated buffer only for reading ICM memory. Due to the new support for args, we can have a case with pending_wc being odd number, and with reading into the same write buffer, it is possible to overwrite next write on the same slot. For example: pending_wc is 17 so the buffer for write is: | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | and we have requests as follows: r wr wr wr wr wr wr wr wr Now, the first read will be written into the last write because we use the same buffer for read and write, before it was written to the HW and we will have a wrong data in the ICM area. Signed-off-by: Erez Shitrit Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_send.c | 25 +++++++++++++------ .../mellanox/mlx5/core/steering/dr_types.h | 5 +--- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index d7c7363f9096..d052d469d4df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -602,9 +602,10 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, send_ring->pending_wqe++; send_info->read.length = send_info->write.length; - /* Read into the same write area */ - send_info->read.addr = (uintptr_t)send_info->write.addr; - send_info->read.lkey = send_ring->mr->mkey; + + /* Read into dedicated sync buffer */ + send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr; + send_info->read.lkey = send_ring->sync_mr->mkey; if (send_ring->pending_wqe % send_ring->signal_th == 0) send_info->read.send_flags = IB_SEND_SIGNALED; @@ -1288,16 +1289,25 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) goto free_mem; } - dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev, - dmn->pdn, dmn->send_ring->sync_buff, - MIN_READ_SYNC); - if (!dmn->send_ring->sync_mr) { + dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size, + GFP_KERNEL); + if (!dmn->send_ring->sync_buff) { ret = -ENOMEM; goto clean_mr; } + dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev, + dmn->pdn, dmn->send_ring->sync_buff, + dmn->send_ring->max_post_send_size); + if (!dmn->send_ring->sync_mr) { + ret = -ENOMEM; + goto free_sync_mem; + } + return 0; +free_sync_mem: + kfree(dmn->send_ring->sync_buff); clean_mr: dr_dereg_mr(dmn->mdev, dmn->send_ring->mr); free_mem: @@ -1320,6 +1330,7 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn, dr_dereg_mr(dmn->mdev, send_ring->sync_mr); dr_dereg_mr(dmn->mdev, send_ring->mr); kfree(send_ring->buf); + kfree(send_ring->sync_buff); kfree(send_ring); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 7b35f78a84a2..81d7ac6d6258 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1429,9 +1429,6 @@ struct mlx5dr_mr { size_t size; }; -#define MAX_SEND_CQE 64 -#define MIN_READ_SYNC 64 - struct mlx5dr_send_ring { struct mlx5dr_cq *cq; struct mlx5dr_qp *qp; @@ -1446,7 +1443,7 @@ struct mlx5dr_send_ring { u32 tx_head; void *buf; u32 buf_size; - u8 sync_buff[MIN_READ_SYNC]; + u8 *sync_buff; struct mlx5dr_mr *sync_mr; spinlock_t lock; /* Protect the data path of the send ring */ bool err_state; /* send_ring is not usable in err state */ From 17dc71c336aac381f59ba541cf85fb0c192d1c1c Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 15 Nov 2022 00:11:38 +0200 Subject: [PATCH 08/15] net/mlx5: DR, Fix QP continuous allocation When allocating a QP we allocate an RQ and an SQ, the RQ is stored first in memory and followed by the SQ. This allocation is not physically continiuos - it may span across different physical pages. SW Steering code always writes in pairs: 1BB write + 1BB read, or 2 continuous BBs of GTA WQE. This lead to an issue where RQ allocation was 4x16 which is equal to 1 WQE BB, causing 1 BB offset in the page and splitting the GTA WQE between different physical pages. The solution was to create the RQ with a even number of BBs and to have the RQ aligned to a page. Signed-off-by: Alex Vesker Signed-off-by: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index d052d469d4df..4a5ae86e2b62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -267,7 +267,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, dr_qp->rq.pc = 0; dr_qp->rq.cc = 0; - dr_qp->rq.wqe_cnt = 4; + dr_qp->rq.wqe_cnt = 256; dr_qp->sq.pc = 0; dr_qp->sq.cc = 0; dr_qp->sq.head = 0; From 608d4f1769d80824d0b02eca73243d377468ee68 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Mon, 27 Mar 2023 23:26:55 +0300 Subject: [PATCH 09/15] net/mlx5: DR, Add modify header arg pool mechanism Added new mechanism for handling arguments for modify-header action. The new action "accelerated modify-header" asks for the arguments from separated area from the pattern, this area accessed via general objects. Handling of these object is done via the pool-manager struct. When the new header patterns are supported, while loading the domain, a few pools for argument creations will be created. The requests for allocating/deallocating arg objects are done via the pool manager API. Signed-off-by: Muhammad Sammar Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../mellanox/mlx5/core/steering/dr_arg.c | 273 ++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_domain.c | 12 + .../mellanox/mlx5/core/steering/dr_types.h | 18 ++ 4 files changed, 304 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 39c2c8dc7e07..ca3c66cd47ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -113,7 +113,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o \ steering/dr_definer.o steering/dr_ptrn.o \ - steering/dr_dbg.o lib/smfs.o + steering/dr_arg.o steering/dr_dbg.o lib/smfs.o # # SF device # diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c new file mode 100644 index 000000000000..01ed6442095d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "dr_types.h" + +#define DR_ICM_MODIFY_HDR_GRANULARITY_4K 12 + +/* modify-header arg pool */ +enum dr_arg_chunk_size { + DR_ARG_CHUNK_SIZE_1, + DR_ARG_CHUNK_SIZE_MIN = DR_ARG_CHUNK_SIZE_1, /* keep updated when changing */ + DR_ARG_CHUNK_SIZE_2, + DR_ARG_CHUNK_SIZE_3, + DR_ARG_CHUNK_SIZE_4, + DR_ARG_CHUNK_SIZE_MAX, +}; + +/* argument pool area */ +struct dr_arg_pool { + enum dr_arg_chunk_size log_chunk_size; + struct mlx5dr_domain *dmn; + struct list_head free_list; + struct mutex mutex; /* protect arg pool */ +}; + +struct mlx5dr_arg_mgr { + struct mlx5dr_domain *dmn; + struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; +}; + +static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) +{ + struct mlx5dr_arg_obj *arg_obj, *tmp_arg; + struct list_head cur_list; + u16 object_range; + int num_of_objects; + u32 obj_id = 0; + int i, ret; + + INIT_LIST_HEAD(&cur_list); + + object_range = + pool->dmn->info.caps.log_header_modify_argument_granularity; + + object_range = + max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, + DR_ICM_MODIFY_HDR_GRANULARITY_4K); + object_range = + min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, + object_range); + + if (pool->log_chunk_size > object_range) { + mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", + pool->log_chunk_size); + return -ENOMEM; + } + + num_of_objects = (1 << (object_range - pool->log_chunk_size)); + /* Only one devx object per range */ + ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev, + object_range, + pool->dmn->pdn, + &obj_id); + if (ret) { + mlx5dr_err(pool->dmn, "failed allocating object with range: %d:\n", + object_range); + return -EAGAIN; + } + + for (i = 0; i < num_of_objects; i++) { + arg_obj = kzalloc(sizeof(*arg_obj), GFP_KERNEL); + if (!arg_obj) { + ret = -ENOMEM; + goto clean_arg_obj; + } + + arg_obj->log_chunk_size = pool->log_chunk_size; + + list_add_tail(&arg_obj->list_node, &cur_list); + + arg_obj->obj_id = obj_id; + arg_obj->obj_offset = i * (1 << pool->log_chunk_size); + } + list_splice_tail_init(&cur_list, &pool->free_list); + + return 0; + +clean_arg_obj: + mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, obj_id); + list_for_each_entry_safe(arg_obj, tmp_arg, &cur_list, list_node) { + list_del(&arg_obj->list_node); + kfree(arg_obj); + } + return ret; +} + +static struct mlx5dr_arg_obj *dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool) +{ + struct mlx5dr_arg_obj *arg_obj = NULL; + int ret; + + mutex_lock(&pool->mutex); + if (list_empty(&pool->free_list)) { + ret = dr_arg_pool_alloc_objs(pool); + if (ret) + goto out; + } + + arg_obj = list_first_entry_or_null(&pool->free_list, + struct mlx5dr_arg_obj, + list_node); + WARN(!arg_obj, "couldn't get dr arg obj from pool"); + + if (arg_obj) + list_del_init(&arg_obj->list_node); + +out: + mutex_unlock(&pool->mutex); + return arg_obj; +} + +static void dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool, + struct mlx5dr_arg_obj *arg_obj) +{ + mutex_lock(&pool->mutex); + list_add(&arg_obj->list_node, &pool->free_list); + mutex_unlock(&pool->mutex); +} + +static struct dr_arg_pool *dr_arg_pool_create(struct mlx5dr_domain *dmn, + enum dr_arg_chunk_size chunk_size) +{ + struct dr_arg_pool *pool; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + pool->dmn = dmn; + + INIT_LIST_HEAD(&pool->free_list); + mutex_init(&pool->mutex); + + pool->log_chunk_size = chunk_size; + if (dr_arg_pool_alloc_objs(pool)) + goto free_pool; + + return pool; + +free_pool: + kfree(pool); + + return NULL; +} + +static void dr_arg_pool_destroy(struct dr_arg_pool *pool) +{ + struct mlx5dr_arg_obj *arg_obj, *tmp_arg; + + list_for_each_entry_safe(arg_obj, tmp_arg, &pool->free_list, list_node) { + list_del(&arg_obj->list_node); + if (!arg_obj->obj_offset) /* the first in range */ + mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, arg_obj->obj_id); + kfree(arg_obj); + } + + mutex_destroy(&pool->mutex); + kfree(pool); +} + +static enum dr_arg_chunk_size dr_arg_get_chunk_size(u16 num_of_actions) +{ + if (num_of_actions <= 8) + return DR_ARG_CHUNK_SIZE_1; + if (num_of_actions <= 16) + return DR_ARG_CHUNK_SIZE_2; + if (num_of_actions <= 32) + return DR_ARG_CHUNK_SIZE_3; + if (num_of_actions <= 64) + return DR_ARG_CHUNK_SIZE_4; + + return DR_ARG_CHUNK_SIZE_MAX; +} + +u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj) +{ + return (arg_obj->obj_id + arg_obj->obj_offset); +} + +struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr, + u16 num_of_actions, + u8 *data) +{ + u32 size = dr_arg_get_chunk_size(num_of_actions); + struct mlx5dr_arg_obj *arg_obj; + int ret; + + if (size >= DR_ARG_CHUNK_SIZE_MAX) + return NULL; + + arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); + if (!arg_obj) { + mlx5dr_err(mgr->dmn, "Failed allocating args object for modify header\n"); + return NULL; + } + + /* write it into the hw */ + ret = mlx5dr_send_postsend_args(mgr->dmn, + mlx5dr_arg_get_obj_id(arg_obj), + num_of_actions, data); + if (ret) { + mlx5dr_err(mgr->dmn, "Failed writing args object\n"); + goto put_obj; + } + + return arg_obj; + +put_obj: + mlx5dr_arg_put_obj(mgr, arg_obj); + return NULL; +} + +void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr, + struct mlx5dr_arg_obj *arg_obj) +{ + dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); +} + +struct mlx5dr_arg_mgr* +mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_arg_mgr *pool_mgr; + int i; + + if (!mlx5dr_domain_is_support_ptrn_arg(dmn)) + return NULL; + + pool_mgr = kzalloc(sizeof(*pool_mgr), GFP_KERNEL); + if (!pool_mgr) + return NULL; + + pool_mgr->dmn = dmn; + + for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) { + pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); + if (!pool_mgr->pools[i]) + goto clean_pools; + } + + return pool_mgr; + +clean_pools: + for (i--; i >= 0; i--) + dr_arg_pool_destroy(pool_mgr->pools[i]); + + kfree(pool_mgr); + return NULL; +} + +void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr) +{ + struct dr_arg_pool **pools; + int i; + + if (!mgr) + return; + + pools = mgr->pools; + for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) + dr_arg_pool_destroy(pools[i]); + + kfree(mgr); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 7a0381572c4c..c4545daf179b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -26,7 +26,18 @@ static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn) return -ENOMEM; } + /* create argument pool */ + dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn); + if (!dmn->arg_mgr) { + mlx5dr_err(dmn, "Couldn't create arg_mgr\n"); + goto free_modify_header_pattern; + } + return 0; + +free_modify_header_pattern: + mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr); + return -ENOMEM; } static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn) @@ -34,6 +45,7 @@ static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn) if (!mlx5dr_domain_is_support_ptrn_arg(dmn)) return; + mlx5dr_arg_mgr_destroy(dmn->arg_mgr); mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 81d7ac6d6258..e102ceb20e01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -28,6 +28,8 @@ #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) struct mlx5dr_ptrn_mgr; +struct mlx5dr_arg_mgr; +struct mlx5dr_arg_obj; static inline bool dr_is_flex_parser_0_id(u8 parser_id) { @@ -941,6 +943,7 @@ struct mlx5dr_domain { struct kmem_cache *chunks_kmem_cache; struct kmem_cache *htbls_kmem_cache; struct mlx5dr_ptrn_mgr *ptrn_mgr; + struct mlx5dr_arg_mgr *arg_mgr; struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; struct xarray csum_fts_xa; @@ -1016,6 +1019,13 @@ struct mlx5dr_ptrn_obj { struct list_head list; }; +struct mlx5dr_arg_obj { + u32 obj_id; + u32 obj_offset; + struct list_head list_node; + u32 log_chunk_size; +}; + struct mlx5dr_action_rewrite { struct mlx5dr_domain *dmn; struct mlx5dr_icm_chunk *chunk; @@ -1566,5 +1576,13 @@ struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mg u16 num_of_actions, u8 *data); void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr, struct mlx5dr_ptrn_obj *pattern); +struct mlx5dr_arg_mgr *mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn); +void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr); +struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr, + u16 num_of_actions, + u8 *data); +void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr, + struct mlx5dr_arg_obj *arg_obj); +u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj); #endif /* _DR_TYPES_H_ */ From 0caebadda57b7f1740406b054ddff36cedf0bf39 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Mon, 7 Nov 2022 02:49:22 +0200 Subject: [PATCH 10/15] net/mlx5: DR, Add modify header argument pointer to actions attributes While building the actions, add the pointer of the arguments for accelerated modify list action into the action's attributes. This will be used later on while building the specific STE for this action. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_action.c | 26 ++++++++++++++----- .../mellanox/mlx5/core/steering/dr_ste_v1.c | 19 +++++++++++++- .../mellanox/mlx5/core/steering/dr_types.h | 4 +++ 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 732a4002eab5..98e3d4f572eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -819,14 +819,28 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, case DR_ACTION_TYP_TNL_L2_TO_L2: break; case DR_ACTION_TYP_TNL_L3_TO_L2: - attr.decap_index = action->rewrite->index; - attr.decap_actions = action->rewrite->num_of_actions; - attr.decap_with_vlan = - attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; + if (action->rewrite->ptrn && action->rewrite->arg) { + attr.decap_index = mlx5dr_arg_get_obj_id(action->rewrite->arg); + attr.decap_actions = action->rewrite->ptrn->num_of_actions; + attr.decap_pat_idx = action->rewrite->ptrn->index; + } else { + attr.decap_index = action->rewrite->index; + attr.decap_actions = action->rewrite->num_of_actions; + attr.decap_with_vlan = + attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; + attr.decap_pat_idx = MLX5DR_INVALID_PATTERN_INDEX; + } break; case DR_ACTION_TYP_MODIFY_HDR: - attr.modify_index = action->rewrite->index; - attr.modify_actions = action->rewrite->num_of_actions; + if (action->rewrite->ptrn && action->rewrite->arg) { + attr.modify_index = mlx5dr_arg_get_obj_id(action->rewrite->arg); + attr.modify_actions = action->rewrite->ptrn->num_of_actions; + attr.modify_pat_idx = action->rewrite->ptrn->index; + } else { + attr.modify_index = action->rewrite->index; + attr.modify_actions = action->rewrite->num_of_actions; + attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX; + } if (action->rewrite->modify_ttl) dr_action_modify_ttl_adjust(dmn, &attr, rx_rule, &recalc_cs_required); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index cf8508139f55..3d04ac08be77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -2179,27 +2179,44 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action) { struct mlx5dr_ptrn_mgr *ptrn_mgr; + int ret; ptrn_mgr = action->rewrite->dmn->ptrn_mgr; if (!ptrn_mgr) return -EOPNOTSUPP; + action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr, + action->rewrite->num_of_actions, + action->rewrite->data); + if (!action->rewrite->arg) { + mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n"); + return -EAGAIN; + } + action->rewrite->ptrn = mlx5dr_ptrn_cache_get_pattern(ptrn_mgr, action->rewrite->num_of_actions, action->rewrite->data); if (!action->rewrite->ptrn) { mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n"); - return -EAGAIN; + ret = -EAGAIN; + goto put_arg; } return 0; + +put_arg: + mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr, + action->rewrite->arg); + return ret; } void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action) { mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr, action->rewrite->ptrn); + mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr, + action->rewrite->arg); } static struct mlx5dr_ste_ctx ste_ctx_v1 = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index e102ceb20e01..3ffda3d302e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -261,11 +261,14 @@ u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste); struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste); #define MLX5DR_MAX_VLANS 2 +#define MLX5DR_INVALID_PATTERN_INDEX 0xffffffff struct mlx5dr_ste_actions_attr { u32 modify_index; + u32 modify_pat_idx; u16 modify_actions; u32 decap_index; + u32 decap_pat_idx; u16 decap_actions; u8 decap_with_vlan:1; u64 final_icm_addr; @@ -1036,6 +1039,7 @@ struct mlx5dr_action_rewrite { u8 allow_tx:1; u8 modify_ttl:1; struct mlx5dr_ptrn_obj *ptrn; + struct mlx5dr_arg_obj *arg; }; struct mlx5dr_action_reformat { From 62e40c8568251c65026712a90a9a486dc5d06d56 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 8 Nov 2022 11:56:11 +0200 Subject: [PATCH 11/15] net/mlx5: DR, Apply new accelerated modify action and decapl3 If there is support for pattern/args, use the new accelerated modify header action for modify header and decap L3 actions. Otherwise fall back to the old modify-header implementation. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_ste_v1.c | 51 +++++++++++++++++-- .../mlx5/core/steering/mlx5_ifc_dr_ste_v1.h | 2 +- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 3d04ac08be77..d2d312454564 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -495,21 +495,59 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, - u8 *s_action, - u16 num_of_actions, - u32 re_write_index) +static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p, + u8 *d_action, + u16 num_of_actions, + u32 rewrite_pattern, + u32 rewrite_args) +{ + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + modify_actions_pattern_pointer, rewrite_pattern); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + number_of_modify_actions, num_of_actions); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + modify_actions_argument_pointer, rewrite_args); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p, + u8 *s_action, + u16 num_of_actions, + u32 rewrite_index) { MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id, DR_STE_V1_ACTION_ID_MODIFY_LIST); MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions, num_of_actions); MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr, - re_write_index); + rewrite_index); dr_ste_v1_set_reparse(hw_ste_p); } +static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, + u8 *action, + u16 num_of_actions, + u32 rewrite_pattern, + u32 rewrite_args) +{ + if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX) + return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p, + action, + num_of_actions, + rewrite_pattern, + rewrite_args); + + /* fall back to the code that doesn't support accelerated modify header */ + return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p, + action, + num_of_actions, + rewrite_args); +} + static void dr_ste_v1_set_aso_flow_meter(u8 *d_action, u32 object_id, u32 offset, @@ -614,6 +652,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, } dr_ste_v1_set_rewrite_actions(last_ste, action, attr->modify_actions, + attr->modify_pat_idx, attr->modify_index); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; @@ -744,6 +783,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { dr_ste_v1_set_rewrite_actions(last_ste, action, attr->decap_actions, + attr->decap_pat_idx, attr->decap_index); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; @@ -799,6 +839,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, } dr_ste_v1_set_rewrite_actions(last_ste, action, attr->modify_actions, + attr->modify_pat_idx, attr->modify_index); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h index 790a17d6207f..ca3b0f1453a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h @@ -100,7 +100,7 @@ struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits { u8 pointer[0x20]; }; -struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits { +struct mlx5_ifc_ste_double_action_accelerated_modify_action_list_v1_bits { u8 action_id[0x8]; u8 modify_actions_pattern_pointer[0x18]; From 947e258537ea43098d778b3315bde4fe1a1a10a3 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 15 Nov 2022 02:36:07 +0200 Subject: [PATCH 12/15] net/mlx5: DR, Support decap L3 action using pattern / arg mechanism Use the new accelerated action for decap L3 on RX side: use the mechanism of pattern and argument same as in modify-header action. Signed-off-by: Erez Shitrit Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_action.c | 21 +++++-------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 98e3d4f572eb..8f8f0a0b38fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1428,23 +1428,12 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, return ret; } - action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, - DR_CHUNK_SIZE_8); - if (!action->rewrite->chunk) { - mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n"); - return -ENOMEM; - } + action->rewrite->data = hw_actions; + action->rewrite->dmn = dmn; - action->rewrite->data = (void *)hw_actions; - action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr - (action->rewrite->chunk) - - dmn->info.caps.hdr_modify_icm_addr) / - DR_ACTION_CACHE_LINE_SIZE; - - ret = mlx5dr_send_postsend_action(dmn, action); + ret = mlx5dr_ste_alloc_modify_hdr(action); if (ret) { - mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n"); - mlx5dr_icm_free_chunk(action->rewrite->chunk); + mlx5dr_dbg(dmn, "Failed preparing reformat data\n"); return ret; } return 0; @@ -2161,7 +2150,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: - mlx5dr_icm_free_chunk(action->rewrite->chunk); + mlx5dr_ste_free_modify_hdr(action); refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_L2_TO_TNL_L2: From 40ff097f2503869fe5d96f796cf84ad4b17eaa9b Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 15 Nov 2022 18:19:39 +0200 Subject: [PATCH 13/15] net/mlx5: DR, Modify header action of size 1 optimization Set modify header action of size 1 directly on the STE for supporting devices, thus reducing number of hops and cache misses. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_action.c | 37 ++++++++++++------ .../mellanox/mlx5/core/steering/dr_ste_v1.c | 38 ++++++++++++------- .../mellanox/mlx5/core/steering/dr_types.h | 2 + 3 files changed, 52 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 8f8f0a0b38fd..0eb9a8d7f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -832,14 +832,20 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } break; case DR_ACTION_TYP_MODIFY_HDR: - if (action->rewrite->ptrn && action->rewrite->arg) { - attr.modify_index = mlx5dr_arg_get_obj_id(action->rewrite->arg); - attr.modify_actions = action->rewrite->ptrn->num_of_actions; - attr.modify_pat_idx = action->rewrite->ptrn->index; - } else { - attr.modify_index = action->rewrite->index; + if (action->rewrite->single_action_opt) { attr.modify_actions = action->rewrite->num_of_actions; - attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX; + attr.single_modify_action = action->rewrite->data; + } else { + if (action->rewrite->ptrn && action->rewrite->arg) { + attr.modify_index = + mlx5dr_arg_get_obj_id(action->rewrite->arg); + attr.modify_actions = action->rewrite->ptrn->num_of_actions; + attr.modify_pat_idx = action->rewrite->ptrn->index; + } else { + attr.modify_index = action->rewrite->index; + attr.modify_actions = action->rewrite->num_of_actions; + attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX; + } } if (action->rewrite->modify_ttl) dr_action_modify_ttl_adjust(dmn, &attr, rx_rule, @@ -1998,9 +2004,15 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, action->rewrite->data = (u8 *)hw_actions; action->rewrite->num_of_actions = num_hw_actions; - ret = mlx5dr_ste_alloc_modify_hdr(action); - if (ret) - goto free_hw_actions; + if (num_hw_actions == 1 && + dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) { + action->rewrite->single_action_opt = true; + } else { + action->rewrite->single_action_opt = false; + ret = mlx5dr_ste_alloc_modify_hdr(action); + if (ret) + goto free_hw_actions; + } return 0; @@ -2151,6 +2163,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) break; case DR_ACTION_TYP_TNL_L3_TO_L2: mlx5dr_ste_free_modify_hdr(action); + kfree(action->rewrite->data); refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_L2_TO_TNL_L2: @@ -2161,7 +2174,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_MODIFY_HDR: - mlx5dr_ste_free_modify_hdr(action); + if (!action->rewrite->single_action_opt) + mlx5dr_ste_free_modify_hdr(action); + kfree(action->rewrite->data); refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_SAMPLER: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index d2d312454564..4c0704ad166b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -499,16 +499,21 @@ static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p, u8 *d_action, u16 num_of_actions, u32 rewrite_pattern, - u32 rewrite_args) + u32 rewrite_args, + u8 *action_data) { - MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, - action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST); - MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, - modify_actions_pattern_pointer, rewrite_pattern); - MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, - number_of_modify_actions, num_of_actions); - MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, - modify_actions_argument_pointer, rewrite_args); + if (action_data) { + memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE); + } else { + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + modify_actions_pattern_pointer, rewrite_pattern); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + number_of_modify_actions, num_of_actions); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + modify_actions_argument_pointer, rewrite_args); + } dr_ste_v1_set_reparse(hw_ste_p); } @@ -532,14 +537,16 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, u8 *action, u16 num_of_actions, u32 rewrite_pattern, - u32 rewrite_args) + u32 rewrite_args, + u8 *action_data) { if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX) return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p, action, num_of_actions, rewrite_pattern, - rewrite_args); + rewrite_args, + action_data); /* fall back to the code that doesn't support accelerated modify header */ return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p, @@ -653,7 +660,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_rewrite_actions(last_ste, action, attr->modify_actions, attr->modify_pat_idx, - attr->modify_index); + attr->modify_index, + attr->single_modify_action); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_encap = false; @@ -784,7 +792,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_set_rewrite_actions(last_ste, action, attr->decap_actions, attr->decap_pat_idx, - attr->decap_index); + attr->decap_index, + NULL); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; @@ -840,7 +849,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_set_rewrite_actions(last_ste, action, attr->modify_actions, attr->modify_pat_idx, - attr->modify_index); + attr->modify_index, + attr->single_modify_action); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 3ffda3d302e0..37b7b1a79f93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -267,6 +267,7 @@ struct mlx5dr_ste_actions_attr { u32 modify_index; u32 modify_pat_idx; u16 modify_actions; + u8 *single_modify_action; u32 decap_index; u32 decap_pat_idx; u16 decap_actions; @@ -1035,6 +1036,7 @@ struct mlx5dr_action_rewrite { u8 *data; u16 num_of_actions; u32 index; + u8 single_action_opt:1; u8 allow_rx:1; u8 allow_tx:1; u8 modify_ttl:1; From a21e52bb8f372b98855d931ca87e43e3c958c04f Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 1 Jan 2023 00:20:53 +0200 Subject: [PATCH 14/15] net/mlx5: DR, Add support for the pattern/arg parameters in debug dump Support the pattern/args-based MODIFY_HDR and TNL_L3_TO_L2 actions in dbg dump Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_dbg.c | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index db81d881d38e..1ff8bde90e1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -140,10 +140,31 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, action->flow_tag->flow_tag); break; case DR_ACTION_TYP_MODIFY_HDR: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + { + struct mlx5dr_ptrn_obj *ptrn = action->rewrite->ptrn; + struct mlx5dr_arg_obj *arg = action->rewrite->arg; + u8 *rewrite_data = action->rewrite->data; + bool ptrn_arg; + int i; + + ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg; + + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x", DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id, - rule_id, action->rewrite->index); + rule_id, action->rewrite->index, + action->rewrite->single_action_opt, + action->rewrite->num_of_actions, + ptrn_arg ? ptrn->index : 0, + ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0); + + for (i = 0; i < action->rewrite->num_of_actions; i++) { + seq_printf(file, ",0x%016llx", + be64_to_cpu(((__be64 *)rewrite_data)[i])); + } + + seq_puts(file, "\n"); break; + } case DR_ACTION_TYP_VPORT: seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id, @@ -157,7 +178,10 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, case DR_ACTION_TYP_TNL_L3_TO_L2: seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id, - rule_id, action->rewrite->index); + rule_id, + (action->rewrite->ptrn && action->rewrite->arg) ? + mlx5dr_arg_get_obj_id(action->rewrite->arg) : + action->rewrite->index); break; case DR_ACTION_TYP_L2_TO_TNL_L2: seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", From 220ae987838c893fe11e46a3e3994a549f203daa Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 30 Aug 2022 01:18:59 +0300 Subject: [PATCH 15/15] net/mlx5: DR, Enable patterns and arguments for supporting devices Check if patterns and arguments for modify header action are supported and enable them accordingly. Signed-off-by: Muhammad Sammar Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index c4545daf179b..9a2dfe6ebe31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -12,7 +12,8 @@ bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn) { - return false; + return dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX && + dmn->info.caps.support_modify_argument; } static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)