net/mlx5: remove flags setting from flow preparation

Even though flow_drv_prepare() takes item_flags and action_flags to be
filled in, those are not used and will be overwritten by parsing of
flow_drv_translate(). There's no reason to keep the flags and fill it.
Appropriate notes are added to the documentation of flow_drv_prepare() and
flow_drv_translate().

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Ori Kam <orika@mellanox.com>
This commit is contained in:
Yongseok Koh 2018-11-05 07:20:47 +00:00 committed by Ferruh Yigit
parent 9ce75f5122
commit c1cfb132fa
5 changed files with 37 additions and 113 deletions

View File

@ -1663,8 +1663,6 @@ static struct mlx5_flow *
flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
uint64_t *item_flags __rte_unused,
uint64_t *action_flags __rte_unused,
struct rte_flow_error *error __rte_unused)
{
rte_errno = ENOTSUP;
@ -1792,16 +1790,19 @@ flow_drv_validate(struct rte_eth_dev *dev,
* calculates the size of memory required for device flow, allocates the memory,
* initializes the device flow and returns the pointer.
*
* @note
* This function initializes device flow structure such as dv, tcf or verbs in
* struct mlx5_flow. However, it is caller's responsibility to initialize the
* rest. For example, adding returning device flow to flow->dev_flow list and
* setting backward reference to the flow should be done out of this function.
* layers field is not filled either.
*
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] item_flags
* Pointer to bit mask of all items detected.
* @param[out] action_flags
* Pointer to bit mask of all actions detected.
* @param[out] error
* Pointer to the error structure.
*
@ -1809,12 +1810,10 @@ flow_drv_validate(struct rte_eth_dev *dev,
* Pointer to device flow on success, otherwise NULL and rte_ernno is set.
*/
static inline struct mlx5_flow *
flow_drv_prepare(struct rte_flow *flow,
flow_drv_prepare(const struct rte_flow *flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
uint64_t *item_flags,
uint64_t *action_flags,
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
@ -1822,8 +1821,7 @@ flow_drv_prepare(struct rte_flow *flow,
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->prepare(attr, items, actions, item_flags, action_flags,
error);
return fops->prepare(attr, items, actions, error);
}
/**
@ -1832,6 +1830,12 @@ flow_drv_prepare(struct rte_flow *flow,
* translates a generic flow into a driver flow. flow_drv_prepare() must
* precede.
*
* @note
* dev_flow->layers could be filled as a result of parsing during translation
* if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
* if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
* flow->actions could be overwritten even though all the expanded dev_flows
* have the same actions.
*
* @param[in] dev
* Pointer to the rte dev structure.
@ -1895,7 +1899,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
* Flow driver remove API. This abstracts calling driver specific functions.
* Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
* on device. All the resources of the flow should be freed by calling
* flow_dv_destroy().
* flow_drv_destroy().
*
* @param[in] dev
* Pointer to Ethernet device.
@ -2026,8 +2030,6 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
{
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
uint64_t action_flags = 0;
uint64_t item_flags = 0;
const struct rte_flow_action_rss *rss;
union {
struct rte_flow_expand_rss buf;
@ -2070,16 +2072,10 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
}
for (i = 0; i < buf->entries; ++i) {
dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
actions, &item_flags, &action_flags,
error);
actions, error);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
dev_flow->layers = item_flags;
/* Store actions once as expanded flows have same actions. */
if (i == 0)
flow->actions = action_flags;
assert(flow->actions == action_flags);
LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
ret = flow_drv_translate(dev, dev_flow, attr,
buf->entry[i].pattern,

View File

@ -302,8 +302,7 @@ typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
struct rte_flow_error *error);
typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
(const struct rte_flow_attr *attr, const struct rte_flow_item items[],
const struct rte_flow_action actions[], uint64_t *item_flags,
uint64_t *action_flags, struct rte_flow_error *error);
const struct rte_flow_action actions[], struct rte_flow_error *error);
typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,

View File

@ -1014,10 +1014,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] item_flags
* Pointer to bit mask of all items detected.
* @param[out] action_flags
* Pointer to bit mask of all actions detected.
* @param[out] error
* Pointer to the error structure.
*
@ -1029,8 +1025,6 @@ static struct mlx5_flow *
flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
uint64_t *item_flags __rte_unused,
uint64_t *action_flags __rte_unused,
struct rte_flow_error *error)
{
uint32_t size = sizeof(struct mlx5_flow);

View File

@ -2370,24 +2370,21 @@ flow_tcf_validate(struct rte_eth_dev *dev,
}
/**
* Calculate maximum size of memory for flow items of Linux TC flower and
* extract specified items.
* Calculate maximum size of memory for flow items of Linux TC flower.
*
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[out] item_flags
* Pointer to the detected items.
*
* @return
* Maximum size of memory for items.
*/
static int
flow_tcf_get_items_and_size(const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
uint64_t *item_flags)
flow_tcf_get_items_size(const struct rte_flow_attr *attr,
const struct rte_flow_item items[])
{
int size = 0;
uint64_t flags = 0;
size += SZ_NLATTR_STRZ_OF("flower") +
SZ_NLATTR_NEST + /* TCA_OPTIONS. */
@ -2404,7 +2401,6 @@ flow_tcf_get_items_and_size(const struct rte_flow_attr *attr,
size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
/* dst/src MAC addr and mask. */
flags |= MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
@ -2412,37 +2408,31 @@ flow_tcf_get_items_and_size(const struct rte_flow_attr *attr,
/* VLAN Ether type. */
SZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */
SZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */
flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint32_t) * 4;
/* dst/src IP addr and mask. */
flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 4;
/* dst/src IP addr and mask. */
flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint16_t) * 4;
/* dst/src port and mask. */
flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint16_t) * 4;
/* dst/src port and mask. */
flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
size += SZ_NLATTR_TYPE_OF(uint32_t);
flags |= MLX5_FLOW_LAYER_VXLAN;
break;
default:
DRV_LOG(WARNING,
@ -2452,7 +2442,6 @@ flow_tcf_get_items_and_size(const struct rte_flow_attr *attr,
break;
}
}
*item_flags = flags;
return size;
}
@ -2668,10 +2657,6 @@ flow_tcf_nl_brand(struct nlmsghdr *nlh, uint32_t handle)
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] item_flags
* Pointer to bit mask of all items detected.
* @param[out] action_flags
* Pointer to bit mask of all actions detected.
* @param[out] error
* Pointer to the error structure.
*
@ -2683,7 +2668,6 @@ static struct mlx5_flow *
flow_tcf_prepare(const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
uint64_t *item_flags, uint64_t *action_flags,
struct rte_flow_error *error)
{
size_t size = RTE_ALIGN_CEIL
@ -2692,12 +2676,13 @@ flow_tcf_prepare(const struct rte_flow_attr *attr,
MNL_ALIGN(sizeof(struct nlmsghdr)) +
MNL_ALIGN(sizeof(struct tcmsg));
struct mlx5_flow *dev_flow;
uint64_t action_flags = 0;
struct nlmsghdr *nlh;
struct tcmsg *tcm;
uint8_t *sp, *tun = NULL;
size += flow_tcf_get_items_and_size(attr, items, item_flags);
size += flow_tcf_get_actions_and_size(actions, action_flags);
size += flow_tcf_get_items_size(attr, items);
size += flow_tcf_get_actions_and_size(actions, &action_flags);
dev_flow = rte_zmalloc(__func__, size, MNL_ALIGNTO);
if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
@ -2706,7 +2691,7 @@ flow_tcf_prepare(const struct rte_flow_attr *attr,
return NULL;
}
sp = (uint8_t *)(dev_flow + 1);
if (*action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) {
if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) {
sp = RTE_PTR_ALIGN
(sp, alignof(struct flow_tcf_tunnel_hdr));
tun = sp;
@ -2718,7 +2703,7 @@ flow_tcf_prepare(const struct rte_flow_attr *attr,
(sizeof(struct flow_tcf_vxlan_encap),
MNL_ALIGNTO);
#endif
} else if (*action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) {
} else if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) {
sp = RTE_PTR_ALIGN
(sp, alignof(struct flow_tcf_tunnel_hdr));
tun = sp;
@ -2747,9 +2732,9 @@ flow_tcf_prepare(const struct rte_flow_attr *attr,
.tcm = tcm,
},
};
if (*action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP)
if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP)
dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_DECAP;
else if (*action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)
else if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)
dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_ENCAP;
/*
* Generate a reasonably unique handle based on the address of the

View File

@ -1209,23 +1209,18 @@ flow_verbs_validate(struct rte_eth_dev *dev,
/**
* Calculate the required bytes that are needed for the action part of the verbs
* flow, in addtion returns bit-fields with all the detected action, in order to
* avoid another interation over the actions.
* flow.
*
* @param[in] actions
* Pointer to the list of actions.
* @param[out] action_flags
* Pointer to the detected actions.
*
* @return
* The size of the memory needed for all actions.
*/
static int
flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
uint64_t *action_flags)
flow_verbs_get_actions_size(const struct rte_flow_action actions[])
{
int size = 0;
uint64_t detected_actions = 0;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
@ -1233,128 +1228,89 @@ flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
size += sizeof(struct ibv_flow_spec_action_tag);
detected_actions |= MLX5_FLOW_ACTION_FLAG;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
size += sizeof(struct ibv_flow_spec_action_tag);
detected_actions |= MLX5_FLOW_ACTION_MARK;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
size += sizeof(struct ibv_flow_spec_action_drop);
detected_actions |= MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
detected_actions |= MLX5_FLOW_ACTION_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
detected_actions |= MLX5_FLOW_ACTION_RSS;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
size += sizeof(struct ibv_flow_spec_counter_action);
#endif
detected_actions |= MLX5_FLOW_ACTION_COUNT;
break;
default:
break;
}
}
*action_flags = detected_actions;
return size;
}
/**
* Calculate the required bytes that are needed for the item part of the verbs
* flow, in addtion returns bit-fields with all the detected action, in order to
* avoid another interation over the actions.
* flow.
*
* @param[in] actions
* @param[in] items
* Pointer to the list of items.
* @param[in, out] item_flags
* Pointer to the detected items.
*
* @return
* The size of the memory needed for all items.
*/
static int
flow_verbs_get_items_and_size(const struct rte_flow_item items[],
uint64_t *item_flags)
flow_verbs_get_items_size(const struct rte_flow_item items[])
{
int size = 0;
uint64_t detected_items = 0;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(detected_items & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
size += sizeof(struct ibv_flow_spec_eth);
detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
size += sizeof(struct ibv_flow_spec_eth);
detected_items |=
tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
(MLX5_FLOW_LAYER_OUTER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
size += sizeof(struct ibv_flow_spec_ipv4_ext);
detected_items |= tunnel ?
MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
size += sizeof(struct ibv_flow_spec_ipv6);
detected_items |= tunnel ?
MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
size += sizeof(struct ibv_flow_spec_tcp_udp);
detected_items |= tunnel ?
MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
size += sizeof(struct ibv_flow_spec_tcp_udp);
detected_items |= tunnel ?
MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
size += sizeof(struct ibv_flow_spec_tunnel);
detected_items |= MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
size += sizeof(struct ibv_flow_spec_tunnel);
detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
break;
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
case RTE_FLOW_ITEM_TYPE_GRE:
size += sizeof(struct ibv_flow_spec_gre);
detected_items |= MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
size += sizeof(struct ibv_flow_spec_mpls);
detected_items |= MLX5_FLOW_LAYER_MPLS;
break;
#else
case RTE_FLOW_ITEM_TYPE_GRE:
size += sizeof(struct ibv_flow_spec_tunnel);
detected_items |= MLX5_FLOW_LAYER_TUNNEL;
break;
#endif
default:
break;
}
}
*item_flags = detected_items;
return size;
}
@ -1369,10 +1325,6 @@ flow_verbs_get_items_and_size(const struct rte_flow_item items[],
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] item_flags
* Pointer to bit mask of all items detected.
* @param[out] action_flags
* Pointer to bit mask of all actions detected.
* @param[out] error
* Pointer to the error structure.
*
@ -1384,15 +1336,13 @@ static struct mlx5_flow *
flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
uint64_t *item_flags,
uint64_t *action_flags,
struct rte_flow_error *error)
{
uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
struct mlx5_flow *flow;
size += flow_verbs_get_actions_and_size(actions, action_flags);
size += flow_verbs_get_items_and_size(items, item_flags);
size += flow_verbs_get_actions_size(actions);
size += flow_verbs_get_items_size(items);
flow = rte_calloc(__func__, 1, size, 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,