net/mlx5: add translation of connection tracking action

When creating a flow with this action context for CT, it needs to be
translated in 2 levels.

First, retrieve from action context to rte_flow action.
Second, translate it to the corresponding DR action with traffic
direction that was specified when creating or updating via
rte_flow_action_handle* API.

Before using the DR action in a flow, the CT context should be
available to use in the hardware. A synchronization is done before
inserting the flow rule with CT action to check the HW availability
of this CT context.

In order to release the DR actions and reuse the context of a CT,
the reference count should also be handled in the flow rule
destroying.

The CT index will be recorded in the rte_flow by reusing the ASO age
index to save memory, since only one ASO action is supported in one
flow rule currently. The action context type should also be saved
for CT. When destroying a flow rule, if the context type is CT and
the index is valid (non-zero), the release process should be
handled. By default, the handling will fall back to try to release
the ASO age if any.

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Bing Zhao 2021-05-05 15:23:22 +03:00 committed by Raslan Darawsheh
parent c5a49265fc
commit 2d084f69aa
5 changed files with 85 additions and 2 deletions

View File

@ -1715,5 +1715,7 @@ int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_ct_action *ct,
struct rte_flow_action_conntrack *profile);
int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_ct_action *ct);
#endif /* RTE_PMD_MLX5_H_ */

View File

@ -3558,6 +3558,15 @@ flow_action_handles_translate(struct rte_eth_dev *dev,
break;
}
/* Fall-through */
case MLX5_INDIRECT_ACTION_TYPE_CT:
if (priv->sh->ct_aso_en) {
translated[handle->index].type =
RTE_FLOW_ACTION_TYPE_CONNTRACK;
translated[handle->index].conf =
(void *)(uintptr_t)idx;
break;
}
/* Fall-through */
default:
mlx5_free(translated);
return rte_flow_error_set

View File

@ -230,6 +230,7 @@ enum mlx5_feature_name {
#define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
#define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
#define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
#define MLX5_FLOW_ACTION_CT (1ull << 41)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
@ -970,11 +971,15 @@ struct rte_flow {
uint32_t drv_type:2; /**< Driver type. */
uint32_t tunnel:1;
uint32_t meter:24; /**< Holds flow meter id. */
uint32_t indirect_type:2; /**< Indirect action type. */
uint32_t rix_mreg_copy;
/**< Index to metadata register copy table resource. */
uint32_t counter; /**< Holds flow counter. */
uint32_t tunnel_id; /**< Tunnel id */
uint32_t age; /**< Holds ASO age bit index. */
union {
uint32_t age; /**< Holds ASO age bit index. */
uint32_t ct; /**< Holds ASO CT index. */
};
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;

View File

@ -1388,3 +1388,44 @@ data_handle:
mlx5_aso_ct_obj_analyze(profile, out_data);
return ret;
}
/*
* Make sure the conntrack context is synchronized with hardware before
* creating a flow rule that uses it.
*
* @param[in] sh
* Pointer to shared device context.
* @param[in] ct
* Pointer to connection tracking offload object.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_ct_action *ct)
{
struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
if (state == ASO_CONNTRACK_FREE) {
rte_errno = ENXIO;
return -rte_errno;
} else if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY) {
return 0;
}
do {
mlx5_aso_ct_completion_handle(mng);
state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
} while (--poll_cqe_times);
rte_errno = EBUSY;
return -rte_errno;
}

View File

@ -11775,6 +11775,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Failed to update CT");
ct->is_original = !!pro->is_original_dir;
return idx;
}
@ -11933,6 +11934,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
int action_type = actions->type;
const struct rte_flow_action *found_action = NULL;
uint32_t jump_group = 0;
uint32_t ct_idx;
struct mlx5_aso_ct_action *ct;
if (!mlx5_flow_os_action_supported(action_type))
return rte_flow_error_set(error, ENOTSUP,
@ -12386,6 +12389,26 @@ flow_dv_translate(struct rte_eth_dev *dev,
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
ct_idx = (uint32_t)(uintptr_t)action->conf;
ct = flow_aso_ct_get_by_idx(dev, ct_idx);
if (mlx5_aso_ct_available(priv->sh, ct))
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"CT is unavailable.");
if (ct->is_original)
dev_flow->dv.actions[actions_n] =
ct->dr_action_orig;
else
dev_flow->dv.actions[actions_n] =
ct->dr_action_rply;
flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = ct_idx;
__atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
@ -13556,7 +13579,10 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
mlx5_flow_meter_detach(priv, fm);
flow->meter = 0;
}
if (flow->age)
/* Keep the current age handling by default. */
if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
flow_dv_aso_ct_release(dev, flow->ct);
else if (flow->age)
flow_dv_aso_age_release(dev, flow->age);
if (flow->geneve_tlv_option) {
flow_dv_geneve_tlv_option_resource_release(dev);