net/mlx5: support flow modify field with HWS

This patch introduces support for modify_field rte_flow actions in HWS
mode that includes:
	- Ingress and egress domains,
	- SET and ADD operations,
	- usage of arbitrary bit offsets and widths for packet and metadata
	  fields.

This is implemented in two phases:
1. On flow table creation the hardware commands are generated, based
   on rte_flow action templates, and stored alongside action template.

2. On flow rule creation/queueing the hardware commands are updated with
   values provided by the user. Any masks over immediate values, provided
   in action templates, are applied to these values before enqueueing rules
   for creation.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Suanming Mou 2022-10-20 18:41:38 +03:00 committed by Raslan Darawsheh
parent 7f6daa490d
commit 0f4aa72b99
7 changed files with 1008 additions and 275 deletions

View File

@ -178,6 +178,7 @@ New Features
* **Updated NVIDIA mlx5 driver.**
* Added full support for queue-based async HW steering.
- Support of modify fields.
* **Updated NXP dpaa2 driver.**

View File

@ -751,6 +751,8 @@ enum mlx5_modification_field {
MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
MLX5_MODI_GTP_TEID = 0x6E,
MLX5_MODI_OUT_IP_ECN = 0x73,
MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
};
/* Total number of metadata reg_c's. */

View File

@ -1553,6 +1553,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_hrxq_clone_free_cb);
if (!priv->hrxqs)
goto error;
mlx5_set_metadata_mask(eth_dev);
if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
!priv->sh->dv_regc0_mask) {
DRV_LOG(ERR, "metadata mode %u is not supported "
"(no metadata reg_c[0] is available)",
sh->config.dv_xmeta_en);
err = ENOTSUP;
goto error;
}
rte_rwlock_init(&priv->ind_tbls_lock);
if (priv->sh->config.dv_flow_en == 2) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
@ -1579,15 +1588,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = -err;
goto error;
}
mlx5_set_metadata_mask(eth_dev);
if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
!priv->sh->dv_regc0_mask) {
DRV_LOG(ERR, "metadata mode %u is not supported "
"(no metadata reg_c[0] is available)",
sh->config.dv_xmeta_en);
err = ENOTSUP;
goto error;
}
/* Query availability of metadata reg_c's. */
if (!priv->sh->metadata_regc_check_flag) {
err = mlx5_flow_discover_mreg_c(eth_dev);

View File

@ -348,6 +348,7 @@ struct mlx5_hw_q_job {
struct rte_flow_hw *flow; /* Flow attached to the job. */
void *user_data; /* Job user data. */
uint8_t *encap_data; /* Encap data. */
struct mlx5_modification_cmd *mhdr_cmd;
};
/* HW steering job descriptor LIFO pool. */

View File

@ -1013,6 +1013,51 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
return items[0].spec;
}
/**
* Fetch 1, 2, 3 or 4 byte field from the byte array
* and return as unsigned integer in host-endian format.
*
* @param[in] data
* Pointer to data array.
* @param[in] size
* Size of field to extract.
*
* @return
* converted field in host endian format.
*/
static inline uint32_t
flow_dv_fetch_field(const uint8_t *data, uint32_t size)
{
uint32_t ret;
switch (size) {
case 1:
ret = *data;
break;
case 2:
ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
break;
case 3:
ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
ret = (ret << 8) | *(data + sizeof(uint16_t));
break;
case 4:
ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
break;
default:
MLX5_ASSERT(false);
ret = 0;
break;
}
return ret;
}
struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
enum mlx5_modification_field id;
};
/* HW steering flow attributes. */
struct mlx5_flow_attr {
uint32_t port_id; /* Port index. */
@ -1081,6 +1126,29 @@ struct mlx5_action_construct_data {
/* encap data len. */
uint16_t len;
} encap;
struct {
/* Modify header action offset in pattern. */
uint16_t mhdr_cmds_off;
/* Offset in pattern after modify header actions. */
uint16_t mhdr_cmds_end;
/*
* True if this action is masked and does not need to
* be generated.
*/
bool shared;
/*
* Modified field definitions in dst field (SET, ADD)
* or src field (COPY).
*/
struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
/* Modified field definitions in dst field (COPY). */
struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
/*
* Masks applied to field values to generate
* PRM actions.
*/
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
} modify_header;
struct {
uint64_t types; /* RSS hash types. */
uint32_t level; /* RSS level. */
@ -1106,6 +1174,7 @@ struct rte_flow_actions_template {
struct rte_flow_actions_template_attr attr;
struct rte_flow_action *actions; /* Cached flow actions. */
struct rte_flow_action *masks; /* Cached action masks.*/
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
};
@ -1126,6 +1195,22 @@ struct mlx5_hw_encap_decap_action {
uint8_t data[]; /* Action data. */
};
#define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
/* Modify field action struct. */
struct mlx5_hw_modify_header_action {
/* Reference to DR action */
struct mlx5dr_action *action;
/* Modify header action position in action rule table. */
uint16_t pos;
/* Is MODIFY_HEADER action shared across flows in table. */
bool shared;
/* Amount of modification commands stored in the precompiled buffer. */
uint32_t mhdr_cmds_num;
/* Precompiled modification commands. */
struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
};
/* The maximum actions support in the flow. */
#define MLX5_HW_MAX_ACTS 16
@ -1135,6 +1220,7 @@ struct mlx5_hw_actions {
LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
struct mlx5_hw_jump_action *jump; /* Jump action. */
struct mlx5_hrxq *tir; /* TIR action. */
struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
/* Encap/Decap action. */
struct mlx5_hw_encap_decap_action *encap_decap;
uint16_t encap_decap_pos; /* Encap/Decap action position. */
@ -2246,6 +2332,16 @@ int flow_dv_action_query(struct rte_eth_dev *dev,
size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
size_t *size, struct rte_flow_error *error);
void mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
struct field_modify_info *info, uint32_t *mask,
uint32_t width, struct rte_eth_dev *dev,
const struct rte_flow_attr *attr, struct rte_flow_error *error);
int flow_dv_convert_modify_action(struct rte_flow_item *item,
struct field_modify_info *field,
struct field_modify_info *dcopy,
struct mlx5_flow_dv_modify_hdr_resource *resource,
uint32_t type, struct rte_flow_error *error);
#define MLX5_PF_VPORT_ID 0
#define MLX5_ECPF_VPORT_ID 0xFFFE

View File

@ -212,12 +212,6 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
attr->valid = 1;
}
struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
enum mlx5_modification_field id;
};
struct field_modify_info modify_eth[] = {
{4, 0, MLX5_MODI_OUT_DMAC_47_16},
{2, 4, MLX5_MODI_OUT_DMAC_15_0},
@ -350,45 +344,6 @@ mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
}
}
/**
* Fetch 1, 2, 3 or 4 byte field from the byte array
* and return as unsigned integer in host-endian format.
*
* @param[in] data
* Pointer to data array.
* @param[in] size
* Size of field to extract.
*
* @return
* converted field in host endian format.
*/
static inline uint32_t
flow_dv_fetch_field(const uint8_t *data, uint32_t size)
{
uint32_t ret;
switch (size) {
case 1:
ret = *data;
break;
case 2:
ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
break;
case 3:
ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
ret = (ret << 8) | *(data + sizeof(uint16_t));
break;
case 4:
ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
break;
default:
MLX5_ASSERT(false);
ret = 0;
break;
}
return ret;
}
/**
* Convert modify-header action to DV specification.
*
@ -417,7 +372,7 @@ flow_dv_fetch_field(const uint8_t *data, uint32_t size)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
int
flow_dv_convert_modify_action(struct rte_flow_item *item,
struct field_modify_info *field,
struct field_modify_info *dcopy,
@ -1435,7 +1390,32 @@ mlx5_flow_item_field_width(struct rte_eth_dev *dev,
return 0;
}
static void
static __rte_always_inline uint8_t
flow_modify_info_mask_8(uint32_t length, uint32_t off)
{
return (0xffu >> (8 - length)) << off;
}
static __rte_always_inline uint16_t
flow_modify_info_mask_16(uint32_t length, uint32_t off)
{
return rte_cpu_to_be_16((0xffffu >> (16 - length)) << off);
}
static __rte_always_inline uint32_t
flow_modify_info_mask_32(uint32_t length, uint32_t off)
{
return rte_cpu_to_be_32((0xffffffffu >> (32 - length)) << off);
}
static __rte_always_inline uint32_t
flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mask)
{
uint32_t mask = (0xffffffffu >> (32 - length)) << off;
return rte_cpu_to_be_32(mask & post_mask);
}
void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
struct field_modify_info *info, uint32_t *mask,
@ -1444,323 +1424,340 @@ mlx5_flow_field_id_to_modify_info
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t idx = 0;
uint32_t off = 0;
switch (data->field) {
uint32_t off_be = 0;
uint32_t length = 0;
switch ((int)data->field) {
case RTE_FLOW_FIELD_START:
/* not supported yet */
MLX5_ASSERT(false);
break;
case RTE_FLOW_FIELD_MAC_DST:
off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
if (data->offset < 16) {
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
if (width < 16) {
mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
break;
++idx;
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
mask[0] = rte_cpu_to_be_32((0xffffffff >>
(32 - width)) << off);
MLX5_ASSERT(data->offset + width <= 48);
off_be = 48 - (data->offset + width);
if (off_be < 16) {
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_DMAC_15_0};
length = off_be + width <= 16 ? width : 16 - off_be;
if (mask)
mask[1] = flow_modify_info_mask_16(length,
off_be);
else
info[idx].offset = off_be;
width -= length;
if (!width)
break;
off_be = 0;
idx++;
} else {
if (data->offset < 16)
info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_DMAC_15_0};
info[idx] = (struct field_modify_info){4, off,
MLX5_MODI_OUT_DMAC_47_16};
off_be -= 16;
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DMAC_47_16};
if (mask)
mask[0] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_MAC_SRC:
off = data->offset > 16 ? data->offset - 16 : 0;
if (mask) {
if (data->offset < 16) {
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
if (width < 16) {
mask[1] = rte_cpu_to_be_16(0xffff >>
(16 - width));
width = 0;
} else {
mask[1] = RTE_BE16(0xffff);
width -= 16;
}
if (!width)
break;
++idx;
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
mask[0] = rte_cpu_to_be_32((0xffffffff >>
(32 - width)) << off);
MLX5_ASSERT(data->offset + width <= 48);
off_be = 48 - (data->offset + width);
if (off_be < 16) {
info[idx] = (struct field_modify_info){2, 4,
MLX5_MODI_OUT_SMAC_15_0};
length = off_be + width <= 16 ? width : 16 - off_be;
if (mask)
mask[1] = flow_modify_info_mask_16(length,
off_be);
else
info[idx].offset = off_be;
width -= length;
if (!width)
break;
off_be = 0;
idx++;
} else {
if (data->offset < 16)
info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_SMAC_15_0};
info[idx] = (struct field_modify_info){4, off,
MLX5_MODI_OUT_SMAC_47_16};
off_be -= 16;
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SMAC_47_16};
if (mask)
mask[0] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_VLAN_TYPE:
/* not supported yet */
break;
case RTE_FLOW_FIELD_VLAN_ID:
MLX5_ASSERT(data->offset + width <= 12);
off_be = 12 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_FIRST_VID};
if (mask)
mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_MAC_TYPE:
MLX5_ASSERT(data->offset + width <= 16);
off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_ETHERTYPE};
if (mask)
mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_DSCP:
MLX5_ASSERT(data->offset + width <= 6);
off_be = 6 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
mask[idx] = 0x3f >> (6 - width);
mask[idx] = flow_modify_info_mask_8(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_TTL:
MLX5_ASSERT(data->offset + width <= 8);
off_be = 8 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV4_TTL};
if (mask)
mask[idx] = 0xff >> (8 - width);
mask[idx] = flow_modify_info_mask_8(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_SRC:
MLX5_ASSERT(data->offset + width <= 32);
off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV4};
if (mask)
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_DST:
MLX5_ASSERT(data->offset + width <= 32);
off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV4};
if (mask)
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV6_DSCP:
MLX5_ASSERT(data->offset + width <= 6);
off_be = 6 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
mask[idx] = 0x3f >> (6 - width);
mask[idx] = flow_modify_info_mask_8(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
MLX5_ASSERT(data->offset + width <= 8);
off_be = 8 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV6_HOPLIMIT};
if (mask)
mask[idx] = 0xff >> (8 - width);
mask[idx] = flow_modify_info_mask_8(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV6_SRC:
if (mask) {
if (data->offset < 32) {
info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_SIPV6_31_0};
if (width < 32) {
mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
case RTE_FLOW_FIELD_IPV6_SRC: {
/*
* Fields corresponding to IPv6 source address bytes
* arranged according to network byte ordering.
*/
struct field_modify_info fields[] = {
{ 4, 0, MLX5_MODI_OUT_SIPV6_127_96 },
{ 4, 4, MLX5_MODI_OUT_SIPV6_95_64 },
{ 4, 8, MLX5_MODI_OUT_SIPV6_63_32 },
{ 4, 12, MLX5_MODI_OUT_SIPV6_31_0 },
};
/* First mask to be modified is the mask of 4th address byte. */
uint32_t midx = 3;
MLX5_ASSERT(data->offset + width <= 128);
off_be = 128 - (data->offset + width);
while (width > 0 && midx > 0) {
if (off_be < 32) {
info[idx] = fields[midx];
length = off_be + width <= 32 ?
width : 32 - off_be;
if (mask)
mask[midx] = flow_modify_info_mask_32
(length, off_be);
else
info[idx].offset = off_be;
width -= length;
off_be = 0;
idx++;
} else {
off_be -= 32;
}
if (data->offset < 64) {
info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_SIPV6_63_32};
if (width < 32) {
mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
if (data->offset < 96) {
info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_SIPV6_95_64};
if (width < 32) {
mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_127_96};
mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_127_96};
midx--;
}
if (!width)
break;
info[idx] = fields[midx];
if (mask)
mask[midx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV6_DST:
if (mask) {
if (data->offset < 32) {
info[idx] = (struct field_modify_info){4, 12,
MLX5_MODI_OUT_DIPV6_31_0};
if (width < 32) {
mask[3] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
mask[3] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
case RTE_FLOW_FIELD_IPV6_DST: {
/*
* Fields corresponding to IPv6 destination address bytes
* arranged according to network byte ordering.
*/
struct field_modify_info fields[] = {
{ 4, 0, MLX5_MODI_OUT_DIPV6_127_96 },
{ 4, 4, MLX5_MODI_OUT_DIPV6_95_64 },
{ 4, 8, MLX5_MODI_OUT_DIPV6_63_32 },
{ 4, 12, MLX5_MODI_OUT_DIPV6_31_0 },
};
/* First mask to be modified is the mask of 4th address byte. */
uint32_t midx = 3;
MLX5_ASSERT(data->offset + width <= 128);
off_be = 128 - (data->offset + width);
while (width > 0 && midx > 0) {
if (off_be < 32) {
info[idx] = fields[midx];
length = off_be + width <= 32 ?
width : 32 - off_be;
if (mask)
mask[midx] = flow_modify_info_mask_32
(length, off_be);
else
info[idx].offset = off_be;
width -= length;
off_be = 0;
idx++;
} else {
off_be -= 32;
}
if (data->offset < 64) {
info[idx] = (struct field_modify_info){4, 8,
MLX5_MODI_OUT_DIPV6_63_32};
if (width < 32) {
mask[2] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
mask[2] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
if (data->offset < 96) {
info[idx] = (struct field_modify_info){4, 4,
MLX5_MODI_OUT_DIPV6_95_64};
if (width < 32) {
mask[1] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
width = 0;
} else {
mask[1] = RTE_BE32(0xffffffff);
width -= 32;
}
if (!width)
break;
++idx;
}
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_127_96};
mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_127_96};
midx--;
}
if (!width)
break;
info[idx] = fields[midx];
if (mask)
mask[midx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
}
case RTE_FLOW_FIELD_TCP_PORT_SRC:
MLX5_ASSERT(data->offset + width <= 16);
off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_SPORT};
if (mask)
mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_PORT_DST:
MLX5_ASSERT(data->offset + width <= 16);
off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_DPORT};
if (mask)
mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_SEQ_NUM:
MLX5_ASSERT(data->offset + width <= 32);
off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_TCP_SEQ_NUM};
if (mask)
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_ACK_NUM:
MLX5_ASSERT(data->offset + width <= 32);
off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_TCP_ACK_NUM};
if (mask)
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_FLAGS:
MLX5_ASSERT(data->offset + width <= 9);
off_be = 9 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_FLAGS};
if (mask)
mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_UDP_PORT_SRC:
MLX5_ASSERT(data->offset + width <= 16);
off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_SPORT};
if (mask)
mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_UDP_PORT_DST:
MLX5_ASSERT(data->offset + width <= 16);
off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_DPORT};
if (mask)
mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
mask[idx] = flow_modify_info_mask_16(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_VXLAN_VNI:
/* not supported yet */
MLX5_ASSERT(data->offset + width <= 24);
/* VNI is on bits 31-8 of TUNNEL_HDR_DW_1. */
off_be = 24 - (data->offset + width) + 8;
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_TUNNEL_HDR_DW_1};
if (mask)
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_GENEVE_VNI:
/* not supported yet*/
break;
case RTE_FLOW_FIELD_GTP_TEID:
MLX5_ASSERT(data->offset + width <= 32);
off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_GTP_TEID};
if (mask)
mask[idx] = rte_cpu_to_be_32(0xffffffff >>
(32 - width));
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TAG:
{
int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
data->level, error);
MLX5_ASSERT(data->offset + width <= 32);
int reg;
if (priv->sh->config.dv_flow_en == 2)
reg = REG_C_1;
else
reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
data->level, error);
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
@ -1768,15 +1765,18 @@ mlx5_flow_field_id_to_modify_info
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
mask[idx] =
rte_cpu_to_be_32(0xffffffff >>
(32 - width));
mask[idx] = flow_modify_info_mask_32
(width, data->offset);
else
info[idx].offset = data->offset;
}
break;
case RTE_FLOW_FIELD_MARK:
{
uint32_t mark_mask = priv->sh->dv_mark_mask;
uint32_t mark_count = __builtin_popcount(mark_mask);
RTE_SET_USED(mark_count);
MLX5_ASSERT(data->offset + width <= mark_count);
int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
0, error);
if (reg < 0)
@ -1786,14 +1786,18 @@ mlx5_flow_field_id_to_modify_info
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
mask[idx] = rte_cpu_to_be_32((mark_mask >>
(mark_count - width)) & mark_mask);
mask[idx] = flow_modify_info_mask_32_masked
(width, data->offset, mark_mask);
else
info[idx].offset = data->offset;
}
break;
case RTE_FLOW_FIELD_META:
{
uint32_t meta_mask = priv->sh->dv_meta_mask;
uint32_t meta_count = __builtin_popcount(meta_mask);
RTE_SET_USED(meta_count);
MLX5_ASSERT(data->offset + width <= meta_count);
int reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return;
@ -1802,16 +1806,32 @@ mlx5_flow_field_id_to_modify_info
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
mask[idx] = rte_cpu_to_be_32((meta_mask >>
(meta_count - width)) & meta_mask);
mask[idx] = flow_modify_info_mask_32_masked
(width, data->offset, meta_mask);
else
info[idx].offset = data->offset;
}
break;
case RTE_FLOW_FIELD_IPV4_ECN:
case RTE_FLOW_FIELD_IPV6_ECN:
MLX5_ASSERT(data->offset + width <= 2);
off_be = 2 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_ECN};
if (mask)
mask[idx] = 0x3 >> (2 - width);
mask[idx] = flow_modify_info_mask_8(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_GTP_PSC_QFI:
MLX5_ASSERT(data->offset + width <= 8);
off_be = data->offset + 8;
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_GTPU_FIRST_EXT_DW_0};
if (mask)
mask[idx] = flow_modify_info_mask_32(width, off_be);
else
info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
@ -1861,7 +1881,8 @@ flow_dv_convert_action_modify_field
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
conf->src.field == RTE_FLOW_FIELD_VALUE) {
type = MLX5_MODIFICATION_TYPE_SET;
type = conf->operation == RTE_FLOW_MODIFY_SET ?
MLX5_MODIFICATION_TYPE_SET : MLX5_MODIFICATION_TYPE_ADD;
/** For SET fill the destination field (field) first. */
mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
conf->width, dev,

View File

@ -319,6 +319,11 @@ __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
acts->jump = NULL;
}
if (acts->mhdr) {
if (acts->mhdr->action)
mlx5dr_action_destroy(acts->mhdr->action);
mlx5_free(acts->mhdr);
}
}
/**
@ -425,6 +430,37 @@ __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
return 0;
}
static __rte_always_inline int
__flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
struct mlx5_hw_actions *acts,
enum rte_flow_action_type type,
uint16_t action_src,
uint16_t action_dst,
uint16_t mhdr_cmds_off,
uint16_t mhdr_cmds_end,
bool shared,
struct field_modify_info *field,
struct field_modify_info *dcopy,
uint32_t *mask)
{
struct mlx5_action_construct_data *act_data;
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
act_data->modify_header.shared = shared;
rte_memcpy(act_data->modify_header.field, field,
sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
rte_memcpy(act_data->modify_header.dcopy, dcopy,
sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
rte_memcpy(act_data->modify_header.mask, mask,
sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
/**
* Append shared RSS action to the dynamic action list.
*
@ -515,6 +551,265 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
return 0;
}
static __rte_always_inline bool
flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
const struct rte_flow_action *mask)
{
const struct rte_flow_action_modify_field *v = action->conf;
const struct rte_flow_action_modify_field *m = mask->conf;
if (v->src.field == RTE_FLOW_FIELD_VALUE) {
uint32_t j;
if (m == NULL)
return false;
for (j = 0; j < RTE_DIM(m->src.value); ++j) {
/*
* Immediate value is considered to be masked
* (and thus shared by all flow rules), if mask
* is non-zero. Partial mask over immediate value
* is not allowed.
*/
if (m->src.value[j])
return true;
}
return false;
}
if (v->src.field == RTE_FLOW_FIELD_POINTER)
return m->src.pvalue != NULL;
/*
* Source field types other than VALUE and
* POINTER are always shared.
*/
return true;
}
static __rte_always_inline bool
flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
const struct mlx5_modification_cmd *cmd)
{
struct mlx5_modification_cmd last_cmd = { { 0 } };
struct mlx5_modification_cmd new_cmd = { { 0 } };
const uint32_t cmds_num = mhdr->mhdr_cmds_num;
unsigned int last_type;
bool should_insert = false;
if (cmds_num == 0)
return false;
last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
last_type = last_cmd.action_type;
new_cmd = *cmd;
new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
switch (new_cmd.action_type) {
case MLX5_MODIFICATION_TYPE_SET:
case MLX5_MODIFICATION_TYPE_ADD:
if (last_type == MLX5_MODIFICATION_TYPE_SET ||
last_type == MLX5_MODIFICATION_TYPE_ADD)
should_insert = new_cmd.field == last_cmd.field;
else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
should_insert = new_cmd.field == last_cmd.dst_field;
else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
should_insert = false;
else
MLX5_ASSERT(false); /* Other types are not supported. */
break;
case MLX5_MODIFICATION_TYPE_COPY:
if (last_type == MLX5_MODIFICATION_TYPE_SET ||
last_type == MLX5_MODIFICATION_TYPE_ADD)
should_insert = (new_cmd.field == last_cmd.field ||
new_cmd.dst_field == last_cmd.field);
else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
should_insert = (new_cmd.field == last_cmd.dst_field ||
new_cmd.dst_field == last_cmd.dst_field);
else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
should_insert = false;
else
MLX5_ASSERT(false); /* Other types are not supported. */
break;
default:
/* Other action types should be rejected on AT validation. */
MLX5_ASSERT(false);
break;
}
return should_insert;
}
static __rte_always_inline int
flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
{
struct mlx5_modification_cmd *nop;
uint32_t num = mhdr->mhdr_cmds_num;
if (num + 1 >= MLX5_MHDR_MAX_CMD)
return -ENOMEM;
nop = mhdr->mhdr_cmds + num;
nop->data0 = 0;
nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
nop->data0 = rte_cpu_to_be_32(nop->data0);
nop->data1 = 0;
mhdr->mhdr_cmds_num = num + 1;
return 0;
}
static __rte_always_inline int
flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
struct mlx5_modification_cmd *cmd)
{
uint32_t num = mhdr->mhdr_cmds_num;
if (num + 1 >= MLX5_MHDR_MAX_CMD)
return -ENOMEM;
mhdr->mhdr_cmds[num] = *cmd;
mhdr->mhdr_cmds_num = num + 1;
return 0;
}
static __rte_always_inline int
flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
struct mlx5_flow_dv_modify_hdr_resource *resource)
{
uint32_t idx;
int ret;
for (idx = 0; idx < resource->actions_num; ++idx) {
struct mlx5_modification_cmd *src = &resource->actions[idx];
if (flow_hw_should_insert_nop(mhdr, src)) {
ret = flow_hw_mhdr_cmd_nop_append(mhdr);
if (ret)
return ret;
}
ret = flow_hw_mhdr_cmd_append(mhdr, src);
if (ret)
return ret;
}
return 0;
}
static __rte_always_inline void
flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
struct rte_flow_actions_template *at)
{
memset(mhdr, 0, sizeof(*mhdr));
/* Modify header action without any commands is shared by default. */
mhdr->shared = true;
mhdr->pos = at->mhdr_off;
}
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_modify_field *conf = action->conf;
union {
struct mlx5_flow_dv_modify_hdr_resource resource;
uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
} dummy;
struct mlx5_flow_dv_modify_hdr_resource *resource;
struct rte_flow_item item = {
.spec = NULL,
.mask = NULL
};
struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
{0, 0, MLX5_MODI_OUT_NONE} };
struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
{0, 0, MLX5_MODI_OUT_NONE} };
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
uint32_t type, value = 0;
uint16_t cmds_start, cmds_end;
bool shared;
int ret;
/*
* Modify header action is shared if previous modify_field actions
* are shared and currently compiled action is shared.
*/
shared = flow_hw_action_modify_field_is_shared(action, action_mask);
mhdr->shared &= shared;
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
conf->src.field == RTE_FLOW_FIELD_VALUE) {
type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
MLX5_MODIFICATION_TYPE_ADD;
/* For SET/ADD fill the destination field (field) first. */
mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
conf->width, dev,
attr, error);
item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
(void *)(uintptr_t)conf->src.pvalue :
(void *)(uintptr_t)&conf->src.value;
if (conf->dst.field == RTE_FLOW_FIELD_META ||
conf->dst.field == RTE_FLOW_FIELD_TAG) {
value = *(const unaligned_uint32_t *)item.spec;
value = rte_cpu_to_be_32(value);
item.spec = &value;
} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
/*
* QFI is passed as an uint8_t integer, but it is accessed through
* a 2nd least significant byte of a 32-bit field in modify header command.
*/
value = *(const uint8_t *)item.spec;
value = rte_cpu_to_be_32(value << 8);
item.spec = &value;
}
} else {
type = MLX5_MODIFICATION_TYPE_COPY;
/* For COPY fill the destination field (dcopy) without mask. */
mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
conf->width, dev,
attr, error);
/* Then construct the source field (field) with mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
conf->width, dev,
attr, error);
}
item.mask = &mask;
memset(&dummy, 0, sizeof(dummy));
resource = &dummy.resource;
ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
if (ret)
return ret;
MLX5_ASSERT(resource->actions_num > 0);
/*
* If previous modify field action collide with this one, then insert NOP command.
* This NOP command will not be a part of action's command range used to update commands
* on rule creation.
*/
if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
ret = flow_hw_mhdr_cmd_nop_append(mhdr);
if (ret)
return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "too many modify field operations specified");
}
cmds_start = mhdr->mhdr_cmds_num;
ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
if (ret)
return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "too many modify field operations specified");
cmds_end = mhdr->mhdr_cmds_num;
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
action - action_start, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "not enough memory to store modify field metadata");
return 0;
}
/**
* Translate rte_flow actions to DR action.
*
@ -558,10 +853,12 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t reformat_pos = MLX5_HW_MAX_ACTS, reformat_src = 0;
uint8_t *encap_data = NULL, *encap_data_m = NULL;
size_t data_size = 0;
struct mlx5_hw_modify_header_action mhdr = { 0 };
bool actions_end = false;
uint32_t type, i;
int err;
flow_hw_modify_field_init(&mhdr, at);
if (attr->transfer)
type = MLX5DR_TABLE_TYPE_FDB;
else if (attr->egress)
@ -717,6 +1014,15 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
DRV_LOG(ERR, "send to kernel action is not supported in HW steering.");
goto err;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
if (mhdr.pos == UINT16_MAX)
mhdr.pos = i++;
err = flow_hw_modify_field_compile(dev, attr, action_start,
actions, masks, acts, &mhdr,
error);
if (err)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@ -724,6 +1030,31 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
break;
}
}
if (mhdr.pos != UINT16_MAX) {
uint32_t flags;
uint32_t bulk_size;
size_t mhdr_len;
acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
0, SOCKET_ID_ANY);
if (!acts->mhdr)
goto err;
rte_memcpy(acts->mhdr, &mhdr, sizeof(*acts->mhdr));
mhdr_len = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num;
flags = mlx5_hw_act_flag[!!attr->group][type];
if (acts->mhdr->shared) {
flags |= MLX5DR_ACTION_FLAG_SHARED;
bulk_size = 0;
} else {
bulk_size = rte_log2_u32(table_attr->nb_flows);
}
acts->mhdr->action = mlx5dr_action_create_modify_header
(priv->dr_ctx, mhdr_len, (__be64 *)acts->mhdr->mhdr_cmds,
bulk_size, flags);
if (!acts->mhdr->action)
goto err;
acts->rule_acts[acts->mhdr->pos].action = acts->mhdr->action;
}
if (reformat_pos != MLX5_HW_MAX_ACTS) {
uint8_t buf[MLX5_ENCAP_MAX_LEN];
bool shared_rfmt = true;
@ -887,6 +1218,110 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev,
return 0;
}
static __rte_always_inline int
flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
{
struct mlx5_modification_cmd cmd_he = {
.data0 = rte_be_to_cpu_32(cmd->data0),
.data1 = 0,
};
return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
}
/**
* Construct flow action array.
*
* For action template contains dynamic actions, these actions need to
* be updated according to the rte_flow action during flow creation.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] job
* Pointer to job descriptor.
* @param[in] hw_acts
* Pointer to translated actions from template.
* @param[in] it_idx
* Item template index the action template refer to.
* @param[in] actions
* Array of rte_flow action need to be checked.
* @param[in] rule_acts
* Array of DR rule actions to be used during flow creation..
* @param[in] acts_num
* Pointer to the real acts_num flow has.
*
* @return
* 0 on success, negative value otherwise and rte_errno is set.
*/
static __rte_always_inline int
flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
struct mlx5_action_construct_data *act_data,
const struct mlx5_hw_actions *hw_acts,
const struct rte_flow_action *action)
{
const struct rte_flow_action_modify_field *mhdr_action = action->conf;
uint8_t values[16] = { 0 };
unaligned_uint32_t *value_p;
uint32_t i;
struct field_modify_info *field;
if (!hw_acts->mhdr)
return -1;
if (hw_acts->mhdr->shared || act_data->modify_header.shared)
return 0;
MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
return 0;
if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
else
rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
mhdr_action->dst.field == RTE_FLOW_FIELD_TAG) {
value_p = (unaligned_uint32_t *)values;
*value_p = rte_cpu_to_be_32(*value_p);
} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
uint32_t tmp;
/*
* QFI is passed as an uint8_t integer, but it is accessed through
* a 2nd least significant byte of a 32-bit field in modify header command.
*/
tmp = values[0];
value_p = (unaligned_uint32_t *)values;
*value_p = rte_cpu_to_be_32(tmp << 8);
}
i = act_data->modify_header.mhdr_cmds_off;
field = act_data->modify_header.field;
do {
uint32_t off_b;
uint32_t mask;
uint32_t data;
const uint8_t *mask_src;
if (i >= act_data->modify_header.mhdr_cmds_end)
return -1;
if (flow_hw_mhdr_cmd_is_nop(&job->mhdr_cmd[i])) {
++i;
continue;
}
mask_src = (const uint8_t *)act_data->modify_header.mask;
mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
if (!mask) {
++field;
continue;
}
off_b = rte_bsf32(mask);
data = flow_dv_fetch_field(values + field->offset, field->size);
data = (data & mask) >> off_b;
job->mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
++field;
} while (field->size);
return 0;
}
/**
* Construct flow action array.
*
@ -931,6 +1366,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
};
uint32_t ft_flag;
size_t encap_len = 0;
int ret;
memcpy(rule_acts, hw_acts->rule_acts,
sizeof(*rule_acts) * hw_acts->acts_num);
@ -948,6 +1384,18 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
} else {
attr.ingress = 1;
}
if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
uint16_t pos = hw_acts->mhdr->pos;
if (!hw_acts->mhdr->shared) {
rule_acts[pos].modify_header.offset =
job->flow->idx - 1;
rule_acts[pos].modify_header.data =
(uint8_t *)job->mhdr_cmd;
rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
}
}
LIST_FOREACH(act_data, &hw_acts->act_list, next) {
uint32_t jump_group;
uint32_t tag;
@ -1023,6 +1471,14 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(raw_encap_data->size ==
act_data->encap.len);
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_hw_modify_field_construct(job,
act_data,
hw_acts,
action);
if (ret)
return -1;
break;
default:
break;
}
@ -1612,6 +2068,155 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
return 0;
}
static bool
flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
enum rte_flow_field_id field)
{
return action->src.field == field || action->dst.field == field;
}
static int
flow_hw_validate_action_modify_field(const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct rte_flow_error *error)
{
const struct rte_flow_action_modify_field *action_conf =
action->conf;
const struct rte_flow_action_modify_field *mask_conf =
mask->conf;
if (action_conf->operation != mask_conf->operation)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modify_field operation mask and template are not equal");
if (action_conf->dst.field != mask_conf->dst.field)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"destination field mask and template are not equal");
if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
action_conf->dst.field == RTE_FLOW_FIELD_VALUE)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"immediate value and pointer cannot be used as destination");
if (mask_conf->dst.level != UINT32_MAX)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"destination encapsulation level must be fully masked");
if (mask_conf->dst.offset != UINT32_MAX)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"destination offset level must be fully masked");
if (action_conf->src.field != mask_conf->src.field)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"destination field mask and template are not equal");
if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
if (mask_conf->src.level != UINT32_MAX)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"source encapsulation level must be fully masked");
if (mask_conf->src.offset != UINT32_MAX)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"source offset level must be fully masked");
}
if (mask_conf->width != UINT32_MAX)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modify_field width field must be fully masked");
if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifying arbitrary place in a packet is not supported");
if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifying vlan_type is not supported");
if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifying Geneve VNI is not supported");
return 0;
}
static int
flow_hw_action_validate(const struct rte_flow_action actions[],
const struct rte_flow_action masks[],
struct rte_flow_error *error)
{
int i;
bool actions_end = false;
int ret;
for (i = 0; !actions_end; ++i) {
const struct rte_flow_action *action = &actions[i];
const struct rte_flow_action *mask = &masks[i];
if (action->type != mask->type)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
action,
"mask type does not match action type");
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_MARK:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_DROP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_RSS:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
/* TODO: Validation logic */
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_hw_validate_action_modify_field(action,
mask,
error);
if (ret < 0)
return ret;
break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
action,
"action not supported in template API");
}
}
return 0;
}
/**
* Create flow action template.
*
@ -1640,6 +2245,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
int len, act_len, mask_len, i;
struct rte_flow_actions_template *at;
if (flow_hw_action_validate(actions, masks, error))
return NULL;
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
NULL, 0, actions, error);
if (act_len <= 0)
@ -2096,6 +2703,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
mem_size += (sizeof(struct mlx5_hw_q_job *) +
sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
sizeof(struct mlx5_modification_cmd) *
MLX5_MHDR_MAX_CMD +
sizeof(struct mlx5_hw_q_job)) *
queue_attr[0]->size;
}
@ -2107,6 +2716,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
for (i = 0; i < nb_queue; i++) {
uint8_t *encap = NULL;
struct mlx5_modification_cmd *mhdr_cmd = NULL;
priv->hw_q[i].job_idx = queue_attr[i]->size;
priv->hw_q[i].size = queue_attr[i]->size;
@ -2118,8 +2728,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
&job[queue_attr[i - 1]->size];
job = (struct mlx5_hw_q_job *)
&priv->hw_q[i].job[queue_attr[i]->size];
encap = (uint8_t *)&job[queue_attr[i]->size];
mhdr_cmd = (struct mlx5_modification_cmd *)&job[queue_attr[i]->size];
encap = (uint8_t *)&mhdr_cmd[queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
for (j = 0; j < queue_attr[i]->size; j++) {
job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
priv->hw_q[i].job[j] = &job[j];
}