diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 39c2c8dc7e07..ca3c66cd47ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -113,7 +113,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o \ steering/dr_definer.o steering/dr_ptrn.o \ - steering/dr_dbg.o lib/smfs.o + steering/dr_arg.o steering/dr_dbg.o lib/smfs.o # # SF device # diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index ee104cf04392..0eb9a8d7f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -819,14 +819,34 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, case DR_ACTION_TYP_TNL_L2_TO_L2: break; case DR_ACTION_TYP_TNL_L3_TO_L2: - attr.decap_index = action->rewrite->index; - attr.decap_actions = action->rewrite->num_of_actions; - attr.decap_with_vlan = - attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; + if (action->rewrite->ptrn && action->rewrite->arg) { + attr.decap_index = mlx5dr_arg_get_obj_id(action->rewrite->arg); + attr.decap_actions = action->rewrite->ptrn->num_of_actions; + attr.decap_pat_idx = action->rewrite->ptrn->index; + } else { + attr.decap_index = action->rewrite->index; + attr.decap_actions = action->rewrite->num_of_actions; + attr.decap_with_vlan = + attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; + attr.decap_pat_idx = MLX5DR_INVALID_PATTERN_INDEX; + } break; case DR_ACTION_TYP_MODIFY_HDR: - attr.modify_index = action->rewrite->index; - attr.modify_actions = action->rewrite->num_of_actions; + if (action->rewrite->single_action_opt) { + attr.modify_actions = action->rewrite->num_of_actions; + attr.single_modify_action = action->rewrite->data; + } else { + if (action->rewrite->ptrn && action->rewrite->arg) { + attr.modify_index = + mlx5dr_arg_get_obj_id(action->rewrite->arg); + attr.modify_actions = action->rewrite->ptrn->num_of_actions; + attr.modify_pat_idx = action->rewrite->ptrn->index; + } else { + attr.modify_index = action->rewrite->index; + attr.modify_actions = action->rewrite->num_of_actions; + attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX; + } + } if (action->rewrite->modify_ttl) dr_action_modify_ttl_adjust(dmn, &attr, rx_rule, &recalc_cs_required); @@ -1365,8 +1385,6 @@ out_err: return -EINVAL; } -#define ACTION_CACHE_LINE_SIZE 64 - static int dr_action_create_reformat_action(struct mlx5dr_domain *dmn, u8 reformat_param_0, u8 reformat_param_1, @@ -1403,36 +1421,25 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, } case DR_ACTION_TYP_TNL_L3_TO_L2: { - u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {}; + u8 hw_actions[DR_ACTION_CACHE_LINE_SIZE] = {}; int ret; ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx, data, data_sz, hw_actions, - ACTION_CACHE_LINE_SIZE, + DR_ACTION_CACHE_LINE_SIZE, &action->rewrite->num_of_actions); if (ret) { mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); return ret; } - action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, - DR_CHUNK_SIZE_8); - if (!action->rewrite->chunk) { - mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n"); - return -ENOMEM; - } + action->rewrite->data = hw_actions; + action->rewrite->dmn = dmn; - action->rewrite->data = (void *)hw_actions; - action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr - (action->rewrite->chunk) - - dmn->info.caps.hdr_modify_icm_addr) / - ACTION_CACHE_LINE_SIZE; - - ret = mlx5dr_send_postsend_action(dmn, action); + ret = mlx5dr_ste_alloc_modify_hdr(action); if (ret) { - mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n"); - mlx5dr_icm_free_chunk(action->rewrite->chunk); + mlx5dr_dbg(dmn, "Failed preparing reformat data\n"); return ret; } return 0; @@ -1963,7 +1970,6 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, __be64 actions[], struct mlx5dr_action *action) { - struct mlx5dr_icm_chunk *chunk; u32 max_hw_actions; u32 num_hw_actions; u32 num_sw_actions; @@ -1980,15 +1986,9 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, return -EINVAL; } - chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16); - if (!chunk) - return -ENOMEM; - hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL); - if (!hw_actions) { - ret = -ENOMEM; - goto free_chunk; - } + if (!hw_actions) + return -ENOMEM; ret = dr_actions_convert_modify_header(action, max_hw_actions, @@ -2000,24 +2000,24 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, if (ret) goto free_hw_actions; - action->rewrite->chunk = chunk; action->rewrite->modify_ttl = modify_ttl; action->rewrite->data = (u8 *)hw_actions; action->rewrite->num_of_actions = num_hw_actions; - action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) - - dmn->info.caps.hdr_modify_icm_addr) / - ACTION_CACHE_LINE_SIZE; - ret = mlx5dr_send_postsend_action(dmn, action); - if (ret) - goto free_hw_actions; + if (num_hw_actions == 1 && + dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) { + action->rewrite->single_action_opt = true; + } else { + action->rewrite->single_action_opt = false; + ret = mlx5dr_ste_alloc_modify_hdr(action); + if (ret) + goto free_hw_actions; + } return 0; free_hw_actions: kfree(hw_actions); -free_chunk: - mlx5dr_icm_free_chunk(chunk); return ret; } @@ -2162,7 +2162,8 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: - mlx5dr_icm_free_chunk(action->rewrite->chunk); + mlx5dr_ste_free_modify_hdr(action); + kfree(action->rewrite->data); refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_L2_TO_TNL_L2: @@ -2173,7 +2174,8 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_MODIFY_HDR: - mlx5dr_icm_free_chunk(action->rewrite->chunk); + if (!action->rewrite->single_action_opt) + mlx5dr_ste_free_modify_hdr(action); kfree(action->rewrite->data); refcount_dec(&action->rewrite->dmn->refcount); break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c new file mode 100644 index 000000000000..01ed6442095d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "dr_types.h" + +#define DR_ICM_MODIFY_HDR_GRANULARITY_4K 12 + +/* modify-header arg pool */ +enum dr_arg_chunk_size { + DR_ARG_CHUNK_SIZE_1, + DR_ARG_CHUNK_SIZE_MIN = DR_ARG_CHUNK_SIZE_1, /* keep updated when changing */ + DR_ARG_CHUNK_SIZE_2, + DR_ARG_CHUNK_SIZE_3, + DR_ARG_CHUNK_SIZE_4, + DR_ARG_CHUNK_SIZE_MAX, +}; + +/* argument pool area */ +struct dr_arg_pool { + enum dr_arg_chunk_size log_chunk_size; + struct mlx5dr_domain *dmn; + struct list_head free_list; + struct mutex mutex; /* protect arg pool */ +}; + +struct mlx5dr_arg_mgr { + struct mlx5dr_domain *dmn; + struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; +}; + +static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) +{ + struct mlx5dr_arg_obj *arg_obj, *tmp_arg; + struct list_head cur_list; + u16 object_range; + int num_of_objects; + u32 obj_id = 0; + int i, ret; + + INIT_LIST_HEAD(&cur_list); + + object_range = + pool->dmn->info.caps.log_header_modify_argument_granularity; + + object_range = + max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, + DR_ICM_MODIFY_HDR_GRANULARITY_4K); + object_range = + min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, + object_range); + + if (pool->log_chunk_size > object_range) { + mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", + pool->log_chunk_size); + return -ENOMEM; + } + + num_of_objects = (1 << (object_range - pool->log_chunk_size)); + /* Only one devx object per range */ + ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev, + object_range, + pool->dmn->pdn, + &obj_id); + if (ret) { + mlx5dr_err(pool->dmn, "failed allocating object with range: %d:\n", + object_range); + return -EAGAIN; + } + + for (i = 0; i < num_of_objects; i++) { + arg_obj = kzalloc(sizeof(*arg_obj), GFP_KERNEL); + if (!arg_obj) { + ret = -ENOMEM; + goto clean_arg_obj; + } + + arg_obj->log_chunk_size = pool->log_chunk_size; + + list_add_tail(&arg_obj->list_node, &cur_list); + + arg_obj->obj_id = obj_id; + arg_obj->obj_offset = i * (1 << pool->log_chunk_size); + } + list_splice_tail_init(&cur_list, &pool->free_list); + + return 0; + +clean_arg_obj: + mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, obj_id); + list_for_each_entry_safe(arg_obj, tmp_arg, &cur_list, list_node) { + list_del(&arg_obj->list_node); + kfree(arg_obj); + } + return ret; +} + +static struct mlx5dr_arg_obj *dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool) +{ + struct mlx5dr_arg_obj *arg_obj = NULL; + int ret; + + mutex_lock(&pool->mutex); + if (list_empty(&pool->free_list)) { + ret = dr_arg_pool_alloc_objs(pool); + if (ret) + goto out; + } + + arg_obj = list_first_entry_or_null(&pool->free_list, + struct mlx5dr_arg_obj, + list_node); + WARN(!arg_obj, "couldn't get dr arg obj from pool"); + + if (arg_obj) + list_del_init(&arg_obj->list_node); + +out: + mutex_unlock(&pool->mutex); + return arg_obj; +} + +static void dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool, + struct mlx5dr_arg_obj *arg_obj) +{ + mutex_lock(&pool->mutex); + list_add(&arg_obj->list_node, &pool->free_list); + mutex_unlock(&pool->mutex); +} + +static struct dr_arg_pool *dr_arg_pool_create(struct mlx5dr_domain *dmn, + enum dr_arg_chunk_size chunk_size) +{ + struct dr_arg_pool *pool; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + pool->dmn = dmn; + + INIT_LIST_HEAD(&pool->free_list); + mutex_init(&pool->mutex); + + pool->log_chunk_size = chunk_size; + if (dr_arg_pool_alloc_objs(pool)) + goto free_pool; + + return pool; + +free_pool: + kfree(pool); + + return NULL; +} + +static void dr_arg_pool_destroy(struct dr_arg_pool *pool) +{ + struct mlx5dr_arg_obj *arg_obj, *tmp_arg; + + list_for_each_entry_safe(arg_obj, tmp_arg, &pool->free_list, list_node) { + list_del(&arg_obj->list_node); + if (!arg_obj->obj_offset) /* the first in range */ + mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, arg_obj->obj_id); + kfree(arg_obj); + } + + mutex_destroy(&pool->mutex); + kfree(pool); +} + +static enum dr_arg_chunk_size dr_arg_get_chunk_size(u16 num_of_actions) +{ + if (num_of_actions <= 8) + return DR_ARG_CHUNK_SIZE_1; + if (num_of_actions <= 16) + return DR_ARG_CHUNK_SIZE_2; + if (num_of_actions <= 32) + return DR_ARG_CHUNK_SIZE_3; + if (num_of_actions <= 64) + return DR_ARG_CHUNK_SIZE_4; + + return DR_ARG_CHUNK_SIZE_MAX; +} + +u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj) +{ + return (arg_obj->obj_id + arg_obj->obj_offset); +} + +struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr, + u16 num_of_actions, + u8 *data) +{ + u32 size = dr_arg_get_chunk_size(num_of_actions); + struct mlx5dr_arg_obj *arg_obj; + int ret; + + if (size >= DR_ARG_CHUNK_SIZE_MAX) + return NULL; + + arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); + if (!arg_obj) { + mlx5dr_err(mgr->dmn, "Failed allocating args object for modify header\n"); + return NULL; + } + + /* write it into the hw */ + ret = mlx5dr_send_postsend_args(mgr->dmn, + mlx5dr_arg_get_obj_id(arg_obj), + num_of_actions, data); + if (ret) { + mlx5dr_err(mgr->dmn, "Failed writing args object\n"); + goto put_obj; + } + + return arg_obj; + +put_obj: + mlx5dr_arg_put_obj(mgr, arg_obj); + return NULL; +} + +void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr, + struct mlx5dr_arg_obj *arg_obj) +{ + dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); +} + +struct mlx5dr_arg_mgr* +mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_arg_mgr *pool_mgr; + int i; + + if (!mlx5dr_domain_is_support_ptrn_arg(dmn)) + return NULL; + + pool_mgr = kzalloc(sizeof(*pool_mgr), GFP_KERNEL); + if (!pool_mgr) + return NULL; + + pool_mgr->dmn = dmn; + + for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) { + pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); + if (!pool_mgr->pools[i]) + goto clean_pools; + } + + return pool_mgr; + +clean_pools: + for (i--; i >= 0; i--) + dr_arg_pool_destroy(pool_mgr->pools[i]); + + kfree(pool_mgr); + return NULL; +} + +void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr) +{ + struct dr_arg_pool **pools; + int i; + + if (!mgr) + return; + + pools = mgr->pools; + for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) + dr_arg_pool_destroy(pools[i]); + + kfree(mgr); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 229f3684100c..3835ba3f4dda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -132,6 +132,17 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); + caps->support_modify_argument = + MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT; + + if (caps->support_modify_argument) { + caps->log_header_modify_argument_granularity = + MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity); + caps->log_header_modify_argument_max_alloc = + MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc); + } + /* geneve_tlv_option_0_exist is the indication of * STE support for lookup type flex_parser_ok */ @@ -682,6 +693,49 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, return 0; } +int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev, + u16 log_obj_range, u32 pd, + u32 *obj_id) +{ + u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, attr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type, + MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT); + MLX5_SET(general_obj_in_cmd_hdr, attr, + op_param.create.log_obj_range, log_obj_range); + + attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg); + MLX5_SET(modify_header_arg, attr, access_pd, pd); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + return 0; +} + +void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev, + u32 obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, struct mlx5dr_cmd_fte_info *fte, bool *extended_dest) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index db81d881d38e..1ff8bde90e1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -140,10 +140,31 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, action->flow_tag->flow_tag); break; case DR_ACTION_TYP_MODIFY_HDR: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + { + struct mlx5dr_ptrn_obj *ptrn = action->rewrite->ptrn; + struct mlx5dr_arg_obj *arg = action->rewrite->arg; + u8 *rewrite_data = action->rewrite->data; + bool ptrn_arg; + int i; + + ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg; + + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x", DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id, - rule_id, action->rewrite->index); + rule_id, action->rewrite->index, + action->rewrite->single_action_opt, + action->rewrite->num_of_actions, + ptrn_arg ? ptrn->index : 0, + ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0); + + for (i = 0; i < action->rewrite->num_of_actions; i++) { + seq_printf(file, ",0x%016llx", + be64_to_cpu(((__be64 *)rewrite_data)[i])); + } + + seq_puts(file, "\n"); break; + } case DR_ACTION_TYP_VPORT: seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id, @@ -157,7 +178,10 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, case DR_ACTION_TYP_TNL_L3_TO_L2: seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id, - rule_id, action->rewrite->index); + rule_id, + (action->rewrite->ptrn && action->rewrite->arg) ? + mlx5dr_arg_get_obj_id(action->rewrite->arg) : + action->rewrite->index); break; case DR_ACTION_TYP_L2_TO_TNL_L2: seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 7a0381572c4c..9a2dfe6ebe31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -12,7 +12,8 @@ bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn) { - return false; + return dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX && + dmn->info.caps.support_modify_argument; } static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn) @@ -26,7 +27,18 @@ static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn) return -ENOMEM; } + /* create argument pool */ + dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn); + if (!dmn->arg_mgr) { + mlx5dr_err(dmn, "Couldn't create arg_mgr\n"); + goto free_modify_header_pattern; + } + return 0; + +free_modify_header_pattern: + mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr); + return -ENOMEM; } static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn) @@ -34,6 +46,7 @@ static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn) if (!mlx5dr_domain_is_support_ptrn_arg(dmn)) return; + mlx5dr_arg_mgr_destroy(dmn->arg_mgr); mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c index 698e79d278bf..13e06a6a6b22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c @@ -2,12 +2,198 @@ // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. #include "dr_types.h" +#include "mlx5_ifc_dr_ste_v1.h" + +enum dr_ptrn_modify_hdr_action_id { + DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00, + DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05, + DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06, + DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07, + DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a, +}; struct mlx5dr_ptrn_mgr { struct mlx5dr_domain *dmn; struct mlx5dr_icm_pool *ptrn_icm_pool; + /* cache for modify_header ptrn */ + struct list_head ptrn_list; + struct mutex modify_hdr_mutex; /* protect the pattern cache */ }; +/* Cache structure and functions */ +static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions, + __be64 cur_hw_actions[], + size_t num_of_actions, + __be64 hw_actions[]) +{ + int i; + + if (cur_num_of_actions != num_of_actions) + return false; + + for (i = 0; i < num_of_actions; i++) { + u8 action_id = + MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id); + + if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) { + if (hw_actions[i] != cur_hw_actions[i]) + return false; + } else { + if ((__force __be32)hw_actions[i] != + (__force __be32)cur_hw_actions[i]) + return false; + } + } + + return true; +} + +static struct mlx5dr_ptrn_obj * +dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr, + size_t num_of_actions, + __be64 hw_actions[]) +{ + struct mlx5dr_ptrn_obj *cached_pattern; + struct mlx5dr_ptrn_obj *tmp; + + list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) { + if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions, + (__be64 *)cached_pattern->data, + num_of_actions, + hw_actions)) { + /* Put this pattern in the head of the list, + * as we will probably use it more. + */ + list_del_init(&cached_pattern->list); + list_add(&cached_pattern->list, &mgr->ptrn_list); + return cached_pattern; + } + } + + return NULL; +} + +static struct mlx5dr_ptrn_obj * +dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr, + u16 num_of_actions, u8 *data) +{ + struct mlx5dr_ptrn_obj *pattern; + struct mlx5dr_icm_chunk *chunk; + u32 chunk_size; + u32 index; + + chunk_size = ilog2(num_of_actions); + /* HW modify action index granularity is at least 64B */ + chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8); + + chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size); + if (!chunk) + return NULL; + + index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) - + mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) / + DR_ACTION_CACHE_LINE_SIZE; + + pattern = kzalloc(sizeof(*pattern), GFP_KERNEL); + if (!pattern) + goto free_chunk; + + pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE * + sizeof(*pattern->data), GFP_KERNEL); + if (!pattern->data) + goto free_pattern; + + memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE); + pattern->chunk = chunk; + pattern->index = index; + pattern->num_of_actions = num_of_actions; + + list_add(&pattern->list, &mgr->ptrn_list); + refcount_set(&pattern->refcount, 1); + + return pattern; + +free_pattern: + kfree(pattern); +free_chunk: + mlx5dr_icm_free_chunk(chunk); + return NULL; +} + +static void +dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern) +{ + list_del(&pattern->list); + mlx5dr_icm_free_chunk(pattern->chunk); + kfree(pattern->data); + kfree(pattern); +} + +struct mlx5dr_ptrn_obj * +mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr, + u16 num_of_actions, + u8 *data) +{ + struct mlx5dr_ptrn_obj *pattern; + u64 *hw_actions; + u8 action_id; + int i; + + mutex_lock(&mgr->modify_hdr_mutex); + pattern = dr_ptrn_find_cached_pattern(mgr, + num_of_actions, + (__be64 *)data); + if (!pattern) { + /* Alloc and add new pattern to cache */ + pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data); + if (!pattern) + goto out_unlock; + + hw_actions = (u64 *)pattern->data; + /* Here we mask the pattern data to create a valid pattern + * since we do an OR operation between the arg and pattern + */ + for (i = 0; i < num_of_actions; i++) { + action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id); + + if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET || + action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD || + action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE) + MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0); + } + + if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk, + num_of_actions, pattern->data)) { + refcount_dec(&pattern->refcount); + goto free_pattern; + } + } else { + refcount_inc(&pattern->refcount); + } + + mutex_unlock(&mgr->modify_hdr_mutex); + + return pattern; + +free_pattern: + dr_ptrn_free_pattern(pattern); +out_unlock: + mutex_unlock(&mgr->modify_hdr_mutex); + return NULL; +} + +void +mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr, + struct mlx5dr_ptrn_obj *pattern) +{ + mutex_lock(&mgr->modify_hdr_mutex); + + if (refcount_dec_and_test(&pattern->refcount)) + dr_ptrn_free_pattern(pattern); + + mutex_unlock(&mgr->modify_hdr_mutex); +} + struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) { struct mlx5dr_ptrn_mgr *mgr; @@ -26,6 +212,7 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) goto free_mgr; } + INIT_LIST_HEAD(&mgr->ptrn_list); return mgr; free_mgr: @@ -35,9 +222,20 @@ free_mgr: void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr) { + struct mlx5dr_ptrn_obj *pattern; + struct mlx5dr_ptrn_obj *tmp; + if (!mgr) return; + WARN_ON(!list_empty(&mgr->ptrn_list)); + + list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) { + list_del(&pattern->list); + kfree(pattern->data); + kfree(pattern); + } + mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool); kfree(mgr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 00bb65613300..4a5ae86e2b62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -20,6 +20,7 @@ struct dr_data_seg { enum send_info_type { WRITE_ICM = 0, + GTA_ARG = 1, }; struct postsend_info { @@ -266,9 +267,10 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, dr_qp->rq.pc = 0; dr_qp->rq.cc = 0; - dr_qp->rq.wqe_cnt = 4; + dr_qp->rq.wqe_cnt = 256; dr_qp->sq.pc = 0; dr_qp->sq.cc = 0; + dr_qp->sq.head = 0; dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr); MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); @@ -367,39 +369,113 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET); } -static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, - u32 rkey, struct dr_data_seg *data_seg, - u32 opcode, bool notify_hw) +static void +dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl, + u32 remote_addr, + struct dr_data_seg *data_seg, + int *size) +{ + struct mlx5_wqe_header_modify_argument_update_seg *wq_arg_seg; + struct mlx5_wqe_flow_update_ctrl_seg *wq_flow_seg; + + wq_ctrl->general_id = cpu_to_be32(remote_addr); + wq_flow_seg = (void *)(wq_ctrl + 1); + + /* mlx5_wqe_flow_update_ctrl_seg - all reserved */ + memset(wq_flow_seg, 0, sizeof(*wq_flow_seg)); + wq_arg_seg = (void *)(wq_flow_seg + 1); + + memcpy(wq_arg_seg->argument_list, + (void *)(uintptr_t)data_seg->addr, + data_seg->length); + + *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */ + sizeof(*wq_flow_seg) + /* WQE flow update ctrl seg - reserved */ + sizeof(*wq_arg_seg)) / /* WQE hdr modify arg seg - data */ + MLX5_SEND_WQE_DS; +} + +static void +dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl, + u64 remote_addr, + u32 rkey, + struct dr_data_seg *data_seg, + unsigned int *size) { struct mlx5_wqe_raddr_seg *wq_raddr; - struct mlx5_wqe_ctrl_seg *wq_ctrl; struct mlx5_wqe_data_seg *wq_dseg; - unsigned int size; - unsigned int idx; - size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 + - sizeof(*wq_raddr) / 16; - - idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); - - wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); - wq_ctrl->imm = 0; - wq_ctrl->fm_ce_se = (data_seg->send_flags) ? - MLX5_WQE_CTRL_CQ_UPDATE : 0; - wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) | - opcode); - wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8); wq_raddr = (void *)(wq_ctrl + 1); + wq_raddr->raddr = cpu_to_be64(remote_addr); wq_raddr->rkey = cpu_to_be32(rkey); wq_raddr->reserved = 0; wq_dseg = (void *)(wq_raddr + 1); + wq_dseg->byte_count = cpu_to_be32(data_seg->length); wq_dseg->lkey = cpu_to_be32(data_seg->lkey); wq_dseg->addr = cpu_to_be64(data_seg->addr); - dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; + *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */ + sizeof(*wq_dseg) + /* WQE data segment */ + sizeof(*wq_raddr)) / /* WQE remote addr segment */ + MLX5_SEND_WQE_DS; +} + +static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl, + struct dr_data_seg *data_seg) +{ + wq_ctrl->signature = 0; + wq_ctrl->rsvd[0] = 0; + wq_ctrl->rsvd[1] = 0; + wq_ctrl->fm_ce_se = data_seg->send_flags & IB_SEND_SIGNALED ? + MLX5_WQE_CTRL_CQ_UPDATE : 0; + wq_ctrl->imm = 0; +} + +static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, + u32 rkey, struct dr_data_seg *data_seg, + u32 opcode, bool notify_hw) +{ + struct mlx5_wqe_ctrl_seg *wq_ctrl; + int opcode_mod = 0; + unsigned int size; + unsigned int idx; + + idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); + + wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); + dr_set_ctrl_seg(wq_ctrl, data_seg); + + switch (opcode) { + case MLX5_OPCODE_RDMA_READ: + case MLX5_OPCODE_RDMA_WRITE: + dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr, + rkey, data_seg, &size); + break; + case MLX5_OPCODE_FLOW_TBL_ACCESS: + opcode_mod = MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT; + dr_rdma_handle_flow_access_arg_segments(wq_ctrl, remote_addr, + data_seg, &size); + break; + default: + WARN(true, "illegal opcode %d", opcode); + return; + } + + /* -------------------------------------------------------- + * |opcode_mod (8 bit)|wqe_index (16 bits)| opcod (8 bits)| + * -------------------------------------------------------- + */ + wq_ctrl->opmod_idx_opcode = + cpu_to_be32((opcode_mod << 24) | + ((dr_qp->sq.pc & 0xffff) << 8) | + opcode); + wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8); + + dr_qp->sq.pc += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); + dr_qp->sq.wqe_head[idx] = dr_qp->sq.head++; if (notify_hw) dr_cmd_notify_hw(dr_qp, wq_ctrl); @@ -412,7 +488,11 @@ static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_inf &send_info->write, MLX5_OPCODE_RDMA_WRITE, false); dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, &send_info->read, MLX5_OPCODE_RDMA_READ, true); + } else { /* GTA_ARG */ + dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, + &send_info->write, MLX5_OPCODE_FLOW_TBL_ACCESS, true); } + } /** @@ -478,11 +558,23 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, } else if (ne == 1) { send_ring->pending_wqe -= send_ring->signal_th; } - } while (is_drain && send_ring->pending_wqe); + } while (ne == 1 || + (is_drain && send_ring->pending_wqe >= send_ring->signal_th)); return 0; } +static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring, + struct postsend_info *send_info) +{ + send_ring->pending_wqe++; + + if (send_ring->pending_wqe % send_ring->signal_th == 0) + send_info->write.send_flags |= IB_SEND_SIGNALED; + else + send_info->write.send_flags = 0; +} + static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, struct mlx5dr_send_ring *send_ring, struct postsend_info *send_info) @@ -510,9 +602,10 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, send_ring->pending_wqe++; send_info->read.length = send_info->write.length; - /* Read into the same write area */ - send_info->read.addr = (uintptr_t)send_info->write.addr; - send_info->read.lkey = send_ring->mr->mkey; + + /* Read into dedicated sync buffer */ + send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr; + send_info->read.lkey = send_ring->sync_mr->mkey; if (send_ring->pending_wqe % send_ring->signal_th == 0) send_info->read.send_flags = IB_SEND_SIGNALED; @@ -526,6 +619,8 @@ static void dr_fill_data_segs(struct mlx5dr_domain *dmn, { if (send_info->type == WRITE_ICM) dr_fill_write_icm_segs(dmn, send_ring, send_info); + else /* args */ + dr_fill_write_args_segs(send_ring, send_info); } static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, @@ -754,6 +849,59 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, return dr_postsend_icm_data(dmn, &send_info); } +int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn, + struct mlx5dr_icm_chunk *chunk, + u16 num_of_actions, + u8 *data) +{ + struct postsend_info send_info = {}; + int ret; + + send_info.write.addr = (uintptr_t)data; + send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE; + send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk); + send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk); + + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + return ret; + + return 0; +} + +int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id, + u16 num_of_actions, u8 *actions_data) +{ + int data_len, iter = 0, cur_sent; + u64 addr; + int ret; + + addr = (uintptr_t)actions_data; + data_len = num_of_actions * DR_MODIFY_ACTION_SIZE; + + do { + struct postsend_info send_info = {}; + + send_info.type = GTA_ARG; + send_info.write.addr = addr; + cur_sent = min_t(u32, data_len, DR_ACTION_CACHE_LINE_SIZE); + send_info.write.length = cur_sent; + send_info.write.lkey = 0; + send_info.remote_addr = arg_id + iter; + + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + goto out; + + iter++; + addr += cur_sent; + data_len -= cur_sent; + } while (data_len > 0); + +out: + return ret; +} + static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev, struct mlx5dr_qp *dr_qp, int port) @@ -1141,16 +1289,25 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) goto free_mem; } - dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev, - dmn->pdn, dmn->send_ring->sync_buff, - MIN_READ_SYNC); - if (!dmn->send_ring->sync_mr) { + dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size, + GFP_KERNEL); + if (!dmn->send_ring->sync_buff) { ret = -ENOMEM; goto clean_mr; } + dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev, + dmn->pdn, dmn->send_ring->sync_buff, + dmn->send_ring->max_post_send_size); + if (!dmn->send_ring->sync_mr) { + ret = -ENOMEM; + goto free_sync_mem; + } + return 0; +free_sync_mem: + kfree(dmn->send_ring->sync_buff); clean_mr: dr_dereg_mr(dmn->mdev, dmn->send_ring->mr); free_mem: @@ -1173,6 +1330,7 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn, dr_dereg_mr(dmn->mdev, send_ring->sync_mr); dr_dereg_mr(dmn->mdev, send_ring->mr); kfree(send_ring->buf); + kfree(send_ring->sync_buff); kfree(send_ring); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 1e15f605df6e..9413aaf51251 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -633,6 +633,63 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, used_hw_action_num); } +static int +dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action) +{ + struct mlx5dr_domain *dmn = action->rewrite->dmn; + u32 chunk_size; + int ret; + + chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions)); + + /* HW modify action index granularity is at least 64B */ + chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8); + + action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, + chunk_size); + if (!action->rewrite->chunk) + return -ENOMEM; + + action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) - + dmn->info.caps.hdr_modify_icm_addr) / + DR_ACTION_CACHE_LINE_SIZE; + + ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action); + if (ret) + goto free_chunk; + + return 0; + +free_chunk: + mlx5dr_icm_free_chunk(action->rewrite->chunk); + return -ENOMEM; +} + +static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action) +{ + mlx5dr_icm_free_chunk(action->rewrite->chunk); +} + +int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action) +{ + struct mlx5dr_domain *dmn = action->rewrite->dmn; + + if (mlx5dr_domain_is_support_ptrn_arg(dmn)) + return dmn->ste_ctx->alloc_modify_hdr_chunk(action); + + return dr_ste_alloc_modify_hdr_chunk(action); +} + +void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action) +{ + struct mlx5dr_domain *dmn = action->rewrite->dmn; + + if (mlx5dr_domain_is_support_ptrn_arg(dmn)) + return dmn->ste_ctx->dealloc_modify_hdr_chunk(action); + + return dr_ste_free_modify_hdr_chunk(action); +} + static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, struct mlx5dr_match_spec *spec) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 7075142bcfb6..54a6619c3ecb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -195,6 +195,8 @@ struct mlx5dr_ste_ctx { u8 *hw_action, u32 hw_action_sz, u16 *used_hw_action_num); + int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action); + void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action); /* Send */ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 27cc6931bbde..4c0704ad166b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -495,21 +495,66 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, - u8 *s_action, - u16 num_of_actions, - u32 re_write_index) +static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p, + u8 *d_action, + u16 num_of_actions, + u32 rewrite_pattern, + u32 rewrite_args, + u8 *action_data) +{ + if (action_data) { + memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE); + } else { + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + modify_actions_pattern_pointer, rewrite_pattern); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + number_of_modify_actions, num_of_actions); + MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action, + modify_actions_argument_pointer, rewrite_args); + } + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p, + u8 *s_action, + u16 num_of_actions, + u32 rewrite_index) { MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id, DR_STE_V1_ACTION_ID_MODIFY_LIST); MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions, num_of_actions); MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr, - re_write_index); + rewrite_index); dr_ste_v1_set_reparse(hw_ste_p); } +static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, + u8 *action, + u16 num_of_actions, + u32 rewrite_pattern, + u32 rewrite_args, + u8 *action_data) +{ + if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX) + return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p, + action, + num_of_actions, + rewrite_pattern, + rewrite_args, + action_data); + + /* fall back to the code that doesn't support accelerated modify header */ + return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p, + action, + num_of_actions, + rewrite_args); +} + static void dr_ste_v1_set_aso_flow_meter(u8 *d_action, u32 object_id, u32 offset, @@ -614,7 +659,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, } dr_ste_v1_set_rewrite_actions(last_ste, action, attr->modify_actions, - attr->modify_index); + attr->modify_pat_idx, + attr->modify_index, + attr->single_modify_action); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_encap = false; @@ -744,7 +791,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { dr_ste_v1_set_rewrite_actions(last_ste, action, attr->decap_actions, - attr->decap_index); + attr->decap_pat_idx, + attr->decap_index, + NULL); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; @@ -799,7 +848,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, } dr_ste_v1_set_rewrite_actions(last_ste, action, attr->modify_actions, - attr->modify_index); + attr->modify_pat_idx, + attr->modify_index, + attr->single_modify_action); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -2176,6 +2227,49 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag; } +int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action) +{ + struct mlx5dr_ptrn_mgr *ptrn_mgr; + int ret; + + ptrn_mgr = action->rewrite->dmn->ptrn_mgr; + if (!ptrn_mgr) + return -EOPNOTSUPP; + + action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr, + action->rewrite->num_of_actions, + action->rewrite->data); + if (!action->rewrite->arg) { + mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n"); + return -EAGAIN; + } + + action->rewrite->ptrn = + mlx5dr_ptrn_cache_get_pattern(ptrn_mgr, + action->rewrite->num_of_actions, + action->rewrite->data); + if (!action->rewrite->ptrn) { + mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n"); + ret = -EAGAIN; + goto put_arg; + } + + return 0; + +put_arg: + mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr, + action->rewrite->arg); + return ret; +} + +void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action) +{ + mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr, + action->rewrite->ptrn); + mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr, + action->rewrite->arg); +} + static struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, @@ -2232,6 +2326,9 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_action_add = &dr_ste_v1_set_action_add, .set_action_copy = &dr_ste_v1_set_action_copy, .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, + .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, + /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h index b5c0f0f8392f..e2fc69867088 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h @@ -31,6 +31,8 @@ void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter, u8 dst_len, u8 src_hw_field, u8 src_shifter); int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, u32 hw_action_sz, u16 *used_hw_action_num); +int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action); +void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action); void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask); void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c index cf1a3c9a1cf4..808b013cf48c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c @@ -221,6 +221,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = { .set_action_add = &dr_ste_v1_set_action_add, .set_action_copy = &dr_ste_v1_set_action_copy, .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, + .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 5b9faa714f42..37b7b1a79f93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -21,12 +21,15 @@ #define DR_NUM_OF_FLEX_PARSERS 8 #define DR_STE_MAX_FLEX_0_ID 3 #define DR_STE_MAX_FLEX_1_ID 7 +#define DR_ACTION_CACHE_LINE_SIZE 64 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) struct mlx5dr_ptrn_mgr; +struct mlx5dr_arg_mgr; +struct mlx5dr_arg_obj; static inline bool dr_is_flex_parser_0_id(u8 parser_id) { @@ -258,11 +261,15 @@ u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste); struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste); #define MLX5DR_MAX_VLANS 2 +#define MLX5DR_INVALID_PATTERN_INDEX 0xffffffff struct mlx5dr_ste_actions_attr { u32 modify_index; + u32 modify_pat_idx; u16 modify_actions; + u8 *single_modify_action; u32 decap_index; + u32 decap_pat_idx; u16 decap_actions; u8 decap_with_vlan:1; u64 final_icm_addr; @@ -334,6 +341,8 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_action, u32 hw_action_sz, u16 *used_hw_action_num); +int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action); +void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action); const struct mlx5dr_ste_action_modify_field * mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field); @@ -893,6 +902,9 @@ struct mlx5dr_cmd_caps { struct mlx5dr_vports vports; bool prio_tag_required; struct mlx5dr_roce_cap roce_caps; + u16 log_header_modify_argument_granularity; + u16 log_header_modify_argument_max_alloc; + bool support_modify_argument; u8 is_ecpf:1; u8 isolate_vl_tc:1; }; @@ -935,6 +947,7 @@ struct mlx5dr_domain { struct kmem_cache *chunks_kmem_cache; struct kmem_cache *htbls_kmem_cache; struct mlx5dr_ptrn_mgr *ptrn_mgr; + struct mlx5dr_arg_mgr *arg_mgr; struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; struct xarray csum_fts_xa; @@ -1001,15 +1014,34 @@ struct mlx5dr_ste_action_modify_field { u8 l4_type; }; +struct mlx5dr_ptrn_obj { + struct mlx5dr_icm_chunk *chunk; + u8 *data; + u16 num_of_actions; + u32 index; + refcount_t refcount; + struct list_head list; +}; + +struct mlx5dr_arg_obj { + u32 obj_id; + u32 obj_offset; + struct list_head list_node; + u32 log_chunk_size; +}; + struct mlx5dr_action_rewrite { struct mlx5dr_domain *dmn; struct mlx5dr_icm_chunk *chunk; u8 *data; u16 num_of_actions; u32 index; + u8 single_action_opt:1; u8 allow_rx:1; u8 allow_tx:1; u8 modify_ttl:1; + struct mlx5dr_ptrn_obj *ptrn; + struct mlx5dr_arg_obj *arg; }; struct mlx5dr_action_reformat { @@ -1341,6 +1373,12 @@ struct mlx5dr_cmd_gid_attr { int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, u16 index, struct mlx5dr_cmd_gid_attr *attr); +int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev, + u16 log_obj_range, u32 pd, + u32 *obj_id); +void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev, + u32 obj_id); + struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, enum mlx5dr_icm_type icm_type); void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool); @@ -1375,6 +1413,7 @@ struct mlx5dr_qp { struct mlx5_wq_ctrl wq_ctrl; u32 qpn; struct { + unsigned int head; unsigned int pc; unsigned int cc; unsigned int size; @@ -1406,9 +1445,6 @@ struct mlx5dr_mr { size_t size; }; -#define MAX_SEND_CQE 64 -#define MIN_READ_SYNC 64 - struct mlx5dr_send_ring { struct mlx5dr_cq *cq; struct mlx5dr_qp *qp; @@ -1423,7 +1459,7 @@ struct mlx5dr_send_ring { u32 tx_head; void *buf; u32 buf_size; - u8 sync_buff[MIN_READ_SYNC]; + u8 *sync_buff; struct mlx5dr_mr *sync_mr; spinlock_t lock; /* Protect the data path of the send ring */ bool err_state; /* send_ring is not usable in err state */ @@ -1447,6 +1483,12 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, bool update_hw_ste); int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, struct mlx5dr_action *action); +int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn, + struct mlx5dr_icm_chunk *chunk, + u16 num_of_actions, + u8 *data); +int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id, + u16 num_of_actions, u8 *actions_data); int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn); void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn); @@ -1536,5 +1578,17 @@ static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev) bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn); struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn); void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr); +struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr, + u16 num_of_actions, u8 *data); +void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr, + struct mlx5dr_ptrn_obj *pattern); +struct mlx5dr_arg_mgr *mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn); +void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr); +struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr, + u16 num_of_actions, + u8 *data); +void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr, + struct mlx5dr_arg_obj *arg_obj); +u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj); #endif /* _DR_TYPES_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h index 790a17d6207f..ca3b0f1453a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h @@ -100,7 +100,7 @@ struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits { u8 pointer[0x20]; }; -struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits { +struct mlx5_ifc_ste_double_action_accelerated_modify_action_list_v1_bits { u8 action_id[0x8]; u8 modify_actions_pattern_pointer[0x18];