net/mlx5/hws: add action object

Action objects are used for executing different HW actions
over packets. Each action contains the HW resources and parameters
needed for action use over the HW when creating a rule.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
This commit is contained in:
Erez Shitrit 2022-10-20 18:57:46 +03:00 committed by Raslan Darawsheh
parent 405242c52d
commit f8c8a6d844
4 changed files with 3084 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,253 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2022 NVIDIA Corporation & Affiliates
*/
#ifndef MLX5DR_ACTION_H_
#define MLX5DR_ACTION_H_
/* Max number of STEs needed for a rule (including match) */
#define MLX5DR_ACTION_MAX_STE 7
enum mlx5dr_action_stc_idx {
MLX5DR_ACTION_STC_IDX_CTRL = 0,
MLX5DR_ACTION_STC_IDX_HIT = 1,
MLX5DR_ACTION_STC_IDX_DW5 = 2,
MLX5DR_ACTION_STC_IDX_DW6 = 3,
MLX5DR_ACTION_STC_IDX_DW7 = 4,
MLX5DR_ACTION_STC_IDX_MAX = 5,
/* STC Jumvo STE combo: CTR, Hit */
MLX5DR_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
/* STC combo1: CTR, SINGLE, DOUBLE, Hit */
MLX5DR_ACTION_STC_IDX_LAST_COMBO1 = 3,
/* STC combo2: CTR, 3 x SINGLE, Hit */
MLX5DR_ACTION_STC_IDX_LAST_COMBO2 = 4,
};
enum mlx5dr_action_offset {
MLX5DR_ACTION_OFFSET_DW0 = 0,
MLX5DR_ACTION_OFFSET_DW5 = 5,
MLX5DR_ACTION_OFFSET_DW6 = 6,
MLX5DR_ACTION_OFFSET_DW7 = 7,
MLX5DR_ACTION_OFFSET_HIT = 3,
MLX5DR_ACTION_OFFSET_HIT_LSB = 4,
};
enum {
MLX5DR_ACTION_DOUBLE_SIZE = 8,
MLX5DR_ACTION_INLINE_DATA_SIZE = 4,
MLX5DR_ACTION_HDR_LEN_L2_MACS = 12,
MLX5DR_ACTION_HDR_LEN_L2_VLAN = 4,
MLX5DR_ACTION_HDR_LEN_L2_ETHER = 2,
MLX5DR_ACTION_HDR_LEN_L2 = (MLX5DR_ACTION_HDR_LEN_L2_MACS +
MLX5DR_ACTION_HDR_LEN_L2_ETHER),
MLX5DR_ACTION_HDR_LEN_L2_W_VLAN = (MLX5DR_ACTION_HDR_LEN_L2 +
MLX5DR_ACTION_HDR_LEN_L2_VLAN),
MLX5DR_ACTION_REFORMAT_DATA_SIZE = 64,
DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
};
enum mlx5dr_action_setter_flag {
ASF_SINGLE1 = 1 << 0,
ASF_SINGLE2 = 1 << 1,
ASF_SINGLE3 = 1 << 2,
ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
ASF_REPARSE = 1 << 3,
ASF_REMOVE = 1 << 4,
ASF_MODIFY = 1 << 5,
ASF_CTR = 1 << 6,
ASF_HIT = 1 << 7,
};
struct mlx5dr_action_default_stc {
struct mlx5dr_pool_chunk nop_ctr;
struct mlx5dr_pool_chunk nop_dw5;
struct mlx5dr_pool_chunk nop_dw6;
struct mlx5dr_pool_chunk nop_dw7;
struct mlx5dr_pool_chunk default_hit;
uint32_t refcount;
};
struct mlx5dr_action_shared_stc {
struct mlx5dr_pool_chunk remove_header;
rte_atomic32_t refcount;
};
struct mlx5dr_actions_apply_data {
struct mlx5dr_send_engine *queue;
struct mlx5dr_rule_action *rule_action;
uint32_t *wqe_data;
struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
uint32_t jump_to_action_stc;
struct mlx5dr_context_common_res *common_res;
enum mlx5dr_table_type tbl_type;
uint32_t next_direct_idx;
uint8_t require_dep;
};
struct mlx5dr_actions_wqe_setter;
typedef void (*mlx5dr_action_setter_fp)
(struct mlx5dr_actions_apply_data *apply,
struct mlx5dr_actions_wqe_setter *setter);
struct mlx5dr_actions_wqe_setter {
mlx5dr_action_setter_fp set_single;
mlx5dr_action_setter_fp set_double;
mlx5dr_action_setter_fp set_hit;
mlx5dr_action_setter_fp set_ctr;
uint8_t idx_single;
uint8_t idx_double;
uint8_t idx_ctr;
uint8_t idx_hit;
uint8_t flags;
};
struct mlx5dr_action_template {
struct mlx5dr_actions_wqe_setter setters[MLX5DR_ACTION_MAX_STE];
enum mlx5dr_action_type *action_type_arr;
uint8_t num_of_action_stes;
uint8_t num_actions;
uint8_t only_term;
};
struct mlx5dr_action {
uint8_t type;
uint8_t flags;
struct mlx5dr_context *ctx;
union {
struct {
struct mlx5dr_pool_chunk stc[MLX5DR_TABLE_TYPE_MAX];
union {
struct {
struct mlx5dr_devx_obj *pattern_obj;
struct mlx5dr_devx_obj *arg_obj;
__be64 single_action;
uint8_t single_action_type;
uint16_t num_of_actions;
} modify_header;
struct {
struct mlx5dr_devx_obj *arg_obj;
uint32_t header_size;
} reformat;
struct {
struct mlx5dr_devx_obj *devx_obj;
uint8_t return_reg_id;
} aso;
struct {
uint16_t vport_num;
uint16_t esw_owner_vhca_id;
} vport;
};
};
struct ibv_flow_action *flow_action;
struct mlx5dv_devx_obj *devx_obj;
struct ibv_qp *qp;
};
};
int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[],
uint32_t num_actions,
struct mlx5dv_flow_action_attr *attr);
int mlx5dr_action_get_default_stc(struct mlx5dr_context *ctx,
uint8_t tbl_type);
void mlx5dr_action_put_default_stc(struct mlx5dr_context *ctx,
uint8_t tbl_type);
void mlx5dr_action_prepare_decap_l3_data(uint8_t *src, uint8_t *dst,
uint16_t num_of_actions);
int mlx5dr_action_template_process(struct mlx5dr_action_template *at);
bool mlx5dr_action_check_combo(enum mlx5dr_action_type *user_actions,
enum mlx5dr_table_type table_type);
int mlx5dr_action_alloc_single_stc(struct mlx5dr_context *ctx,
struct mlx5dr_cmd_stc_modify_attr *stc_attr,
uint32_t table_type,
struct mlx5dr_pool_chunk *stc);
void mlx5dr_action_free_single_stc(struct mlx5dr_context *ctx,
uint32_t table_type,
struct mlx5dr_pool_chunk *stc);
static inline void
mlx5dr_action_setter_default_single(struct mlx5dr_actions_apply_data *apply,
__rte_unused struct mlx5dr_actions_wqe_setter *setter)
{
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] =
htobe32(apply->common_res->default_stc->nop_dw5.offset);
}
static inline void
mlx5dr_action_setter_default_double(struct mlx5dr_actions_apply_data *apply,
__rte_unused struct mlx5dr_actions_wqe_setter *setter)
{
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0;
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] =
htobe32(apply->common_res->default_stc->nop_dw6.offset);
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] =
htobe32(apply->common_res->default_stc->nop_dw7.offset);
}
static inline void
mlx5dr_action_setter_default_ctr(struct mlx5dr_actions_apply_data *apply,
__rte_unused struct mlx5dr_actions_wqe_setter *setter)
{
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW0] = 0;
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_CTRL] =
htobe32(apply->common_res->default_stc->nop_ctr.offset);
}
static inline void
mlx5dr_action_apply_setter(struct mlx5dr_actions_apply_data *apply,
struct mlx5dr_actions_wqe_setter *setter,
bool is_jumbo)
{
uint8_t num_of_actions;
/* Set control counter */
if (setter->flags & ASF_CTR)
setter->set_ctr(apply, setter);
else
mlx5dr_action_setter_default_ctr(apply, setter);
/* Set single and double on match */
if (!is_jumbo) {
if (setter->flags & ASF_SINGLE1)
setter->set_single(apply, setter);
else
mlx5dr_action_setter_default_single(apply, setter);
if (setter->flags & ASF_DOUBLE)
setter->set_double(apply, setter);
else
mlx5dr_action_setter_default_double(apply, setter);
num_of_actions = setter->flags & ASF_DOUBLE ?
MLX5DR_ACTION_STC_IDX_LAST_COMBO1 :
MLX5DR_ACTION_STC_IDX_LAST_COMBO2;
} else {
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0;
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 0;
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = 0;
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;
num_of_actions = MLX5DR_ACTION_STC_IDX_LAST_JUMBO_STE;
}
/* Set next/final hit action */
setter->set_hit(apply, setter);
/* Set number of actions */
apply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_CTRL] |=
htobe32(num_of_actions << 29);
}
#endif /* MLX5DR_ACTION_H_ */

View File

@ -0,0 +1,511 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2022 NVIDIA Corporation & Affiliates
*/
#include "mlx5dr_internal.h"
enum mlx5dr_arg_chunk_size
mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size)
{
/* Return the roundup of log2(data_size) */
if (data_size <= MLX5DR_ARG_DATA_SIZE)
return MLX5DR_ARG_CHUNK_SIZE_1;
if (data_size <= MLX5DR_ARG_DATA_SIZE * 2)
return MLX5DR_ARG_CHUNK_SIZE_2;
if (data_size <= MLX5DR_ARG_DATA_SIZE * 4)
return MLX5DR_ARG_CHUNK_SIZE_3;
if (data_size <= MLX5DR_ARG_DATA_SIZE * 8)
return MLX5DR_ARG_CHUNK_SIZE_4;
return MLX5DR_ARG_CHUNK_SIZE_MAX;
}
uint32_t mlx5dr_arg_data_size_to_arg_size(uint16_t data_size)
{
return BIT(mlx5dr_arg_data_size_to_arg_log_size(data_size));
}
enum mlx5dr_arg_chunk_size
mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions)
{
return mlx5dr_arg_data_size_to_arg_log_size(num_of_actions *
MLX5DR_MODIFY_ACTION_SIZE);
}
uint32_t mlx5dr_arg_get_arg_size(uint16_t num_of_actions)
{
return BIT(mlx5dr_arg_get_arg_log_size(num_of_actions));
}
/* Cache and cache element handling */
int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache)
{
struct mlx5dr_pattern_cache *new_cache;
new_cache = simple_calloc(1, sizeof(*new_cache));
if (!new_cache) {
rte_errno = ENOMEM;
return rte_errno;
}
LIST_INIT(&new_cache->head);
pthread_spin_init(&new_cache->lock, PTHREAD_PROCESS_PRIVATE);
*cache = new_cache;
return 0;
}
void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache)
{
simple_free(cache);
}
static bool mlx5dr_pat_compare_pattern(enum mlx5dr_action_type cur_type,
int cur_num_of_actions,
__be64 cur_actions[],
enum mlx5dr_action_type type,
int num_of_actions,
__be64 actions[])
{
int i;
if (cur_num_of_actions != num_of_actions || cur_type != type)
return false;
/* All decap-l3 look the same, only change is the num of actions */
if (type == MLX5DR_ACTION_TYP_TNL_L3_TO_L2)
return true;
for (i = 0; i < num_of_actions; i++) {
u8 action_id =
MLX5_GET(set_action_in, &actions[i], action_type);
if (action_id == MLX5_MODIFICATION_TYPE_COPY) {
if (actions[i] != cur_actions[i])
return false;
} else {
/* Compare just the control, not the values */
if ((__be32)actions[i] !=
(__be32)cur_actions[i])
return false;
}
}
return true;
}
static struct mlx5dr_pat_cached_pattern *
mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache *cache,
struct mlx5dr_action *action,
uint16_t num_of_actions,
__be64 *actions)
{
struct mlx5dr_pat_cached_pattern *cached_pat;
LIST_FOREACH(cached_pat, &cache->head, next) {
if (mlx5dr_pat_compare_pattern(cached_pat->type,
cached_pat->mh_data.num_of_actions,
(__be64 *)cached_pat->mh_data.data,
action->type,
num_of_actions,
actions))
return cached_pat;
}
return NULL;
}
static struct mlx5dr_pat_cached_pattern *
mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache *cache,
struct mlx5dr_action *action,
uint16_t num_of_actions,
__be64 *actions)
{
struct mlx5dr_pat_cached_pattern *cached_pattern;
cached_pattern = mlx5dr_pat_find_cached_pattern(cache, action, num_of_actions, actions);
if (cached_pattern) {
/* LRU: move it to be first in the list */
LIST_REMOVE(cached_pattern, next);
LIST_INSERT_HEAD(&cache->head, cached_pattern, next);
rte_atomic32_add(&cached_pattern->refcount, 1);
}
return cached_pattern;
}
static struct mlx5dr_pat_cached_pattern *
mlx5dr_pat_get_cached_pattern_by_action(struct mlx5dr_pattern_cache *cache,
struct mlx5dr_action *action)
{
struct mlx5dr_pat_cached_pattern *cached_pattern;
LIST_FOREACH(cached_pattern, &cache->head, next) {
if (cached_pattern->mh_data.pattern_obj->id == action->modify_header.pattern_obj->id)
return cached_pattern;
}
return NULL;
}
static struct mlx5dr_pat_cached_pattern *
mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache,
struct mlx5dr_devx_obj *pattern_obj,
enum mlx5dr_action_type type,
uint16_t num_of_actions,
__be64 *actions)
{
struct mlx5dr_pat_cached_pattern *cached_pattern;
cached_pattern = simple_calloc(1, sizeof(*cached_pattern));
if (!cached_pattern) {
DR_LOG(ERR, "Failed to allocate cached_pattern");
rte_errno = ENOMEM;
return NULL;
}
cached_pattern->type = type;
cached_pattern->mh_data.num_of_actions = num_of_actions;
cached_pattern->mh_data.pattern_obj = pattern_obj;
cached_pattern->mh_data.data =
simple_malloc(num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);
if (!cached_pattern->mh_data.data) {
DR_LOG(ERR, "Failed to allocate mh_data.data");
rte_errno = ENOMEM;
goto free_cached_obj;
}
memcpy(cached_pattern->mh_data.data, actions,
num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);
LIST_INSERT_HEAD(&cache->head, cached_pattern, next);
rte_atomic32_init(&cached_pattern->refcount);
rte_atomic32_set(&cached_pattern->refcount, 1);
return cached_pattern;
free_cached_obj:
simple_free(cached_pattern);
return NULL;
}
static void
mlx5dr_pat_remove_pattern(struct mlx5dr_pat_cached_pattern *cached_pattern)
{
LIST_REMOVE(cached_pattern, next);
simple_free(cached_pattern->mh_data.data);
simple_free(cached_pattern);
}
static void
mlx5dr_pat_put_pattern(struct mlx5dr_pattern_cache *cache,
struct mlx5dr_action *action)
{
struct mlx5dr_pat_cached_pattern *cached_pattern;
pthread_spin_lock(&cache->lock);
cached_pattern = mlx5dr_pat_get_cached_pattern_by_action(cache, action);
if (!cached_pattern) {
DR_LOG(ERR, "Failed to find pattern according to action with pt");
assert(false);
goto out;
}
if (!rte_atomic32_dec_and_test(&cached_pattern->refcount))
goto out;
mlx5dr_pat_remove_pattern(cached_pattern);
out:
pthread_spin_unlock(&cache->lock);
}
static int mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx,
struct mlx5dr_action *action,
uint16_t num_of_actions,
size_t pattern_sz,
__be64 *pattern)
{
struct mlx5dr_pat_cached_pattern *cached_pattern;
int ret = 0;
pthread_spin_lock(&ctx->pattern_cache->lock);
cached_pattern = mlx5dr_pat_get_existing_cached_pattern(ctx->pattern_cache,
action,
num_of_actions,
pattern);
if (cached_pattern) {
action->modify_header.pattern_obj = cached_pattern->mh_data.pattern_obj;
goto out_unlock;
}
action->modify_header.pattern_obj =
mlx5dr_cmd_header_modify_pattern_create(ctx->ibv_ctx,
pattern_sz,
(uint8_t *)pattern);
if (!action->modify_header.pattern_obj) {
DR_LOG(ERR, "Failed to create pattern FW object");
ret = rte_errno;
goto out_unlock;
}
cached_pattern =
mlx5dr_pat_add_pattern_to_cache(ctx->pattern_cache,
action->modify_header.pattern_obj,
action->type,
num_of_actions,
pattern);
if (!cached_pattern) {
DR_LOG(ERR, "Failed to add pattern to cache");
ret = rte_errno;
goto clean_pattern;
}
out_unlock:
pthread_spin_unlock(&ctx->pattern_cache->lock);
return ret;
clean_pattern:
mlx5dr_cmd_destroy_obj(action->modify_header.pattern_obj);
pthread_spin_unlock(&ctx->pattern_cache->lock);
return ret;
}
static void
mlx5d_arg_init_send_attr(struct mlx5dr_send_engine_post_attr *send_attr,
void *comp_data,
uint32_t arg_idx)
{
send_attr->opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
send_attr->opmod = MLX5DR_WQE_GTA_OPMOD_MOD_ARG;
send_attr->len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
send_attr->id = arg_idx;
send_attr->user_data = comp_data;
}
void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,
uint32_t arg_idx,
uint8_t *arg_data,
uint16_t num_of_actions)
{
struct mlx5dr_send_engine_post_attr send_attr = {0};
struct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;
struct mlx5dr_send_engine_post_ctrl ctrl;
struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
size_t wqe_len;
mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
ctrl = mlx5dr_send_engine_post_start(queue);
mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
mlx5dr_action_prepare_decap_l3_data(arg_data, (uint8_t *)wqe_arg,
num_of_actions);
mlx5dr_send_engine_post_end(&ctrl, &send_attr);
}
static int
mlx5dr_arg_poll_for_comp(struct mlx5dr_context *ctx, uint16_t queue_id)
{
struct rte_flow_op_result comp[1];
int ret;
while (true) {
ret = mlx5dr_send_queue_poll(ctx, queue_id, comp, 1);
if (ret) {
if (ret < 0) {
DR_LOG(ERR, "Failed mlx5dr_send_queue_poll");
} else if (comp[0].status == RTE_FLOW_OP_ERROR) {
DR_LOG(ERR, "Got comp with error");
rte_errno = ENOENT;
}
break;
}
}
return (ret == 1 ? 0 : ret);
}
void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,
void *comp_data,
uint32_t arg_idx,
uint8_t *arg_data,
size_t data_size)
{
struct mlx5dr_send_engine_post_attr send_attr = {0};
struct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;
struct mlx5dr_send_engine_post_ctrl ctrl;
struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
int i, full_iter, leftover;
size_t wqe_len;
mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
/* Each WQE can hold 64B of data, it might require multiple iteration */
full_iter = data_size / MLX5DR_ARG_DATA_SIZE;
leftover = data_size & (MLX5DR_ARG_DATA_SIZE - 1);
for (i = 0; i < full_iter; i++) {
ctrl = mlx5dr_send_engine_post_start(queue);
mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
memcpy(wqe_arg, arg_data, wqe_len);
send_attr.id = arg_idx++;
mlx5dr_send_engine_post_end(&ctrl, &send_attr);
/* Move to next argument data */
arg_data += MLX5DR_ARG_DATA_SIZE;
}
if (leftover) {
ctrl = mlx5dr_send_engine_post_start(queue);
mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
memcpy(wqe_arg, arg_data, leftover);
send_attr.id = arg_idx;
mlx5dr_send_engine_post_end(&ctrl, &send_attr);
}
}
int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,
uint32_t arg_idx,
uint8_t *arg_data,
size_t data_size)
{
struct mlx5dr_send_engine *queue;
int ret;
pthread_spin_lock(&ctx->ctrl_lock);
/* Get the control queue */
queue = &ctx->send_queue[ctx->queues - 1];
mlx5dr_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
mlx5dr_send_engine_flush_queue(queue);
/* Poll for completion */
ret = mlx5dr_arg_poll_for_comp(ctx, ctx->queues - 1);
if (ret)
DR_LOG(ERR, "Failed to get completions for shared action");
pthread_spin_unlock(&ctx->ctrl_lock);
return ret;
}
bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx,
uint32_t arg_size)
{
if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
return false;
}
return true;
}
static int
mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx,
struct mlx5dr_action *action,
uint16_t num_of_actions,
__be64 *pattern,
uint32_t bulk_size)
{
uint32_t flags = action->flags;
uint16_t args_log_size;
int ret = 0;
/* Alloc bulk of args */
args_log_size = mlx5dr_arg_get_arg_log_size(num_of_actions);
if (args_log_size >= MLX5DR_ARG_CHUNK_SIZE_MAX) {
DR_LOG(ERR, "Exceed number of allowed actions %u",
num_of_actions);
rte_errno = EINVAL;
return rte_errno;
}
if (!mlx5dr_arg_is_valid_arg_request_size(ctx, args_log_size + bulk_size)) {
DR_LOG(ERR, "Arg size %d does not fit FW capability",
args_log_size + bulk_size);
rte_errno = EINVAL;
return rte_errno;
}
action->modify_header.arg_obj =
mlx5dr_cmd_arg_create(ctx->ibv_ctx, args_log_size + bulk_size,
ctx->pd_num);
if (!action->modify_header.arg_obj) {
DR_LOG(ERR, "Failed allocating arg in order: %d",
args_log_size + bulk_size);
return rte_errno;
}
/* When INLINE need to write the arg data */
if (flags & MLX5DR_ACTION_FLAG_SHARED)
ret = mlx5dr_arg_write_inline_arg_data(ctx,
action->modify_header.arg_obj->id,
(uint8_t *)pattern,
num_of_actions *
MLX5DR_MODIFY_ACTION_SIZE);
if (ret) {
DR_LOG(ERR, "Failed writing INLINE arg in order: %d",
args_log_size + bulk_size);
mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);
return rte_errno;
}
return 0;
}
int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx,
struct mlx5dr_action *action,
size_t pattern_sz,
__be64 pattern[],
uint32_t bulk_size)
{
uint16_t num_of_actions;
int ret;
num_of_actions = pattern_sz / MLX5DR_MODIFY_ACTION_SIZE;
if (num_of_actions == 0) {
DR_LOG(ERR, "Invalid number of actions %u\n", num_of_actions);
rte_errno = EINVAL;
return rte_errno;
}
action->modify_header.num_of_actions = num_of_actions;
ret = mlx5dr_arg_create_modify_header_arg(ctx, action,
num_of_actions,
pattern,
bulk_size);
if (ret) {
DR_LOG(ERR, "Failed to allocate arg");
return ret;
}
ret = mlx5dr_pat_get_pattern(ctx, action, num_of_actions, pattern_sz,
pattern);
if (ret) {
DR_LOG(ERR, "Failed to allocate pattern");
goto free_arg;
}
return 0;
free_arg:
mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);
return rte_errno;
}
void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx,
struct mlx5dr_action *action)
{
mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);
mlx5dr_pat_put_pattern(ctx->pattern_cache, action);
}

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2022 NVIDIA Corporation & Affiliates
*/
#ifndef MLX5DR_PAT_ARG_H_
#define MLX5DR_PAT_ARG_H_
/* Modify-header arg pool */
enum mlx5dr_arg_chunk_size {
MLX5DR_ARG_CHUNK_SIZE_1,
/* Keep MIN updated when changing */
MLX5DR_ARG_CHUNK_SIZE_MIN = MLX5DR_ARG_CHUNK_SIZE_1,
MLX5DR_ARG_CHUNK_SIZE_2,
MLX5DR_ARG_CHUNK_SIZE_3,
MLX5DR_ARG_CHUNK_SIZE_4,
MLX5DR_ARG_CHUNK_SIZE_MAX,
};
enum {
MLX5DR_MODIFY_ACTION_SIZE = 8,
MLX5DR_ARG_DATA_SIZE = 64,
};
struct mlx5dr_pattern_cache {
/* Protect pattern list */
pthread_spinlock_t lock;
LIST_HEAD(pattern_head, mlx5dr_pat_cached_pattern) head;
};
struct mlx5dr_pat_cached_pattern {
enum mlx5dr_action_type type;
struct {
struct mlx5dr_devx_obj *pattern_obj;
struct dr_icm_chunk *chunk;
uint8_t *data;
uint16_t num_of_actions;
} mh_data;
rte_atomic32_t refcount;
LIST_ENTRY(mlx5dr_pat_cached_pattern) next;
};
enum mlx5dr_arg_chunk_size
mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions);
uint32_t mlx5dr_arg_get_arg_size(uint16_t num_of_actions);
enum mlx5dr_arg_chunk_size
mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size);
uint32_t mlx5dr_arg_data_size_to_arg_size(uint16_t data_size);
int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache);
void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache);
int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx,
struct mlx5dr_action *action,
size_t pattern_sz,
__be64 pattern[],
uint32_t bulk_size);
void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx,
struct mlx5dr_action *action);
bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx,
uint32_t arg_size);
void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,
void *comp_data,
uint32_t arg_idx,
uint8_t *arg_data,
size_t data_size);
void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,
uint32_t arg_idx,
uint8_t *arg_data,
uint16_t num_of_actions);
int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,
uint32_t arg_idx,
uint8_t *arg_data,
size_t data_size);
#endif /* MLX5DR_PAT_ARG_H_ */