net/mlx5: add abstraction for multiple flow drivers

Flow engine has to support multiple driver paths. Verbs/DV for NIC flow
steering and Linux TC flower for E-Switch flow steering. In the future,
another flow driver could be added (devX).

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Yongseok Koh 2018-09-24 19:55:14 +00:00 committed by Ferruh Yigit
parent 51e72d386c
commit 0c76d1c9a1
5 changed files with 335 additions and 77 deletions

View File

@ -1194,7 +1194,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
if (err < 0) if (err < 0)
goto error; goto error;
priv->config.flow_prio = err; priv->config.flow_prio = err;
mlx5_flow_init_driver_ops(eth_dev);
/* /*
* Once the device is added to the list of memory event * Once the device is added to the list of memory event
* callback, its global MR cache table cannot be expanded * callback, its global MR cache table cannot be expanded

View File

@ -38,6 +38,23 @@
extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate; extern const struct eth_dev_ops mlx5_dev_ops_isolate;
/** Device flow drivers. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
#endif
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
#endif
[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
};
enum mlx5_expansion { enum mlx5_expansion {
MLX5_EXPANSION_ROOT, MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER, MLX5_EXPANSION_ROOT_OUTER,
@ -283,9 +300,6 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
}, },
}; };
/* Holds the nic operations that should be used. */
struct mlx5_flow_driver_ops nic_ops;
/** /**
* Discover the maximum number of priority available. * Discover the maximum number of priority available.
* *
@ -1529,6 +1543,284 @@ mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
" update."); " update.");
} }
static int
flow_null_validate(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error __rte_unused)
{
rte_errno = ENOTSUP;
return -rte_errno;
}
static struct mlx5_flow *
flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
uint64_t *item_flags __rte_unused,
uint64_t *action_flags __rte_unused,
struct rte_flow_error *error __rte_unused)
{
rte_errno = ENOTSUP;
return NULL;
}
static int
flow_null_translate(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow *dev_flow __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error __rte_unused)
{
rte_errno = ENOTSUP;
return -rte_errno;
}
static int
flow_null_apply(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow __rte_unused,
struct rte_flow_error *error __rte_unused)
{
rte_errno = ENOTSUP;
return -rte_errno;
}
static void
flow_null_remove(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow __rte_unused)
{
}
static void
flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow __rte_unused)
{
}
/* Void driver to protect from null pointer reference. */
const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
.validate = flow_null_validate,
.prepare = flow_null_prepare,
.translate = flow_null_translate,
.apply = flow_null_apply,
.remove = flow_null_remove,
.destroy = flow_null_destroy,
};
/**
* Select flow driver type according to flow attributes and device
* configuration.
*
* @param[in] dev
* Pointer to the dev structure.
* @param[in] attr
* Pointer to the flow attributes.
*
* @return
* flow driver type if supported, MLX5_FLOW_TYPE_MAX otherwise.
*/
static enum mlx5_flow_drv_type
flow_get_drv_type(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr)
{
struct priv *priv __rte_unused = dev->data->dev_private;
enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
if (!attr->transfer) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
MLX5_FLOW_TYPE_VERBS;
#else
type = MLX5_FLOW_TYPE_VERBS;
#endif
}
return type;
}
#define flow_get_drv_ops(type) flow_drv_ops[type]
/**
* Flow driver validation API. This abstracts calling driver specific functions.
* The type of flow driver is determined according to flow attributes.
*
* @param[in] dev
* Pointer to the dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
static inline int
flow_drv_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
fops = flow_get_drv_ops(type);
return fops->validate(dev, attr, items, actions, error);
}
/**
* Flow driver preparation API. This abstracts calling driver specific
* functions. Parent flow (rte_flow) should have driver type (drv_type). It
* calculates the size of memory required for device flow, allocates the memory,
* initializes the device flow and returns the pointer.
*
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] item_flags
* Pointer to bit mask of all items detected.
* @param[out] action_flags
* Pointer to bit mask of all actions detected.
* @param[out] error
* Pointer to the error structure.
*
* @return
* Pointer to device flow on success, otherwise NULL and rte_ernno is set.
*/
static inline struct mlx5_flow *
flow_drv_prepare(struct rte_flow *flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
uint64_t *item_flags,
uint64_t *action_flags,
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->prepare(attr, items, actions, item_flags, action_flags,
error);
}
/**
* Flow driver translation API. This abstracts calling driver specific
* functions. Parent flow (rte_flow) should have driver type (drv_type). It
* translates a generic flow into a driver flow. flow_drv_prepare() must
* precede.
*
*
* @param[in] dev
* Pointer to the rte dev structure.
* @param[in, out] dev_flow
* Pointer to the mlx5 flow.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
static inline int
flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->translate(dev, dev_flow, attr, items, actions, error);
}
/**
* Flow driver apply API. This abstracts calling driver specific functions.
* Parent flow (rte_flow) should have driver type (drv_type). It applies
* translated driver flows on to device. flow_drv_translate() must precede.
*
* @param[in] dev
* Pointer to Ethernet device structure.
* @param[in, out] flow
* Pointer to flow structure.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static inline int
flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->apply(dev, flow, error);
}
/**
* Flow driver remove API. This abstracts calling driver specific functions.
* Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
* on device. All the resources of the flow should be freed by calling
* flow_dv_destroy().
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in, out] flow
* Pointer to flow structure.
*/
static inline void
flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->remove(dev, flow);
}
/**
* Flow driver destroy API. This abstracts calling driver specific functions.
* Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
* on device and releases resources of the flow.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in, out] flow
* Pointer to flow structure.
*/
static inline void
flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->destroy(dev, flow);
}
/** /**
* Validate a flow supported by the NIC. * Validate a flow supported by the NIC.
* *
@ -1544,7 +1836,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
{ {
int ret; int ret;
ret = nic_ops.validate(dev, attr, items, actions, error); ret = flow_drv_validate(dev, attr, items, actions, error);
if (ret < 0) if (ret < 0)
return ret; return ret;
return 0; return 0;
@ -1634,7 +1926,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
uint32_t i; uint32_t i;
uint32_t flow_size; uint32_t flow_size;
ret = mlx5_flow_validate(dev, attr, items, actions, error); ret = flow_drv_validate(dev, attr, items, actions, error);
if (ret < 0) if (ret < 0)
return NULL; return NULL;
flow_size = sizeof(struct rte_flow); flow_size = sizeof(struct rte_flow);
@ -1645,6 +1937,9 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
else else
flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
flow = rte_calloc(__func__, 1, flow_size, 0); flow = rte_calloc(__func__, 1, flow_size, 0);
flow->drv_type = flow_get_drv_type(dev, attr);
assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
flow->queue = (void *)(flow + 1); flow->queue = (void *)(flow + 1);
LIST_INIT(&flow->dev_flows); LIST_INIT(&flow->dev_flows);
if (rss && rss->types) { if (rss && rss->types) {
@ -1662,21 +1957,21 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
buf->entry[0].pattern = (void *)(uintptr_t)items; buf->entry[0].pattern = (void *)(uintptr_t)items;
} }
for (i = 0; i < buf->entries; ++i) { for (i = 0; i < buf->entries; ++i) {
dev_flow = nic_ops.prepare(attr, buf->entry[i].pattern, dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
actions, &item_flags, actions, &item_flags, &action_flags,
&action_flags, error); error);
if (!dev_flow) if (!dev_flow)
goto error; goto error;
dev_flow->flow = flow; dev_flow->flow = flow;
LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
ret = nic_ops.translate(dev, dev_flow, attr, ret = flow_drv_translate(dev, dev_flow, attr,
buf->entry[i].pattern, buf->entry[i].pattern,
actions, error); actions, error);
if (ret < 0) if (ret < 0)
goto error; goto error;
} }
if (dev->data->dev_started) { if (dev->data->dev_started) {
ret = nic_ops.apply(dev, flow, error); ret = flow_drv_apply(dev, flow, error);
if (ret < 0) if (ret < 0)
goto error; goto error;
} }
@ -1686,7 +1981,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
error: error:
ret = rte_errno; /* Save rte_errno before cleanup. */ ret = rte_errno; /* Save rte_errno before cleanup. */
assert(flow); assert(flow);
nic_ops.destroy(dev, flow); flow_drv_destroy(dev, flow);
rte_free(flow); rte_free(flow);
rte_errno = ret; /* Restore rte_errno. */ rte_errno = ret; /* Restore rte_errno. */
return NULL; return NULL;
@ -1724,7 +2019,7 @@ static void
mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
struct rte_flow *flow) struct rte_flow *flow)
{ {
nic_ops.destroy(dev, flow); flow_drv_destroy(dev, flow);
TAILQ_REMOVE(list, flow, next); TAILQ_REMOVE(list, flow, next);
/* /*
* Update RX queue flags only if port is started, otherwise it is * Update RX queue flags only if port is started, otherwise it is
@ -1768,7 +2063,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
struct rte_flow *flow; struct rte_flow *flow;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
nic_ops.remove(dev, flow); flow_drv_remove(dev, flow);
mlx5_flow_rxq_flags_clear(dev); mlx5_flow_rxq_flags_clear(dev);
} }
@ -1791,7 +2086,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
int ret = 0; int ret = 0;
TAILQ_FOREACH(flow, list, next) { TAILQ_FOREACH(flow, list, next) {
ret = nic_ops.apply(dev, flow, &error); ret = flow_drv_apply(dev, flow, &error);
if (ret < 0) if (ret < 0)
goto error; goto error;
mlx5_flow_rxq_flags_set(dev, flow); mlx5_flow_rxq_flags_set(dev, flow);
@ -2482,24 +2777,3 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
} }
return 0; return 0;
} }
/**
* Init the driver ops structure.
*
* @param dev
* Pointer to Ethernet device structure.
*/
void
mlx5_flow_init_driver_ops(struct rte_eth_dev *dev)
{
struct priv *priv __rte_unused = dev->data->dev_private;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
if (priv->config.dv_flow_en)
mlx5_flow_dv_get_driver_ops(&nic_ops);
else
mlx5_flow_verbs_get_driver_ops(&nic_ops);
#else
mlx5_flow_verbs_get_driver_ops(&nic_ops);
#endif
}

View File

@ -120,6 +120,13 @@
/* Max number of actions per DV flow. */ /* Max number of actions per DV flow. */
#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
enum mlx5_flow_drv_type {
MLX5_FLOW_TYPE_MIN,
MLX5_FLOW_TYPE_DV,
MLX5_FLOW_TYPE_VERBS,
MLX5_FLOW_TYPE_MAX,
};
/* Matcher PRM representation */ /* Matcher PRM representation */
struct mlx5_flow_dv_match_params { struct mlx5_flow_dv_match_params {
size_t size; size_t size;
@ -205,7 +212,7 @@ struct mlx5_flow_counter {
/* Flow structure. */ /* Flow structure. */
struct rte_flow { struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
struct rte_flow_attr attributes; /**< User flow attribute. */ enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
uint32_t layers; uint32_t layers;
/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */ /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
struct mlx5_flow_counter *counter; /**< Holds flow counter. */ struct mlx5_flow_counter *counter; /**< Holds flow counter. */
@ -309,13 +316,5 @@ int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
uint64_t item_flags, uint64_t item_flags,
struct rte_eth_dev *dev, struct rte_eth_dev *dev,
struct rte_flow_error *error); struct rte_flow_error *error);
void mlx5_flow_init_driver_ops(struct rte_eth_dev *dev);
/* mlx5_flow_dv.c */
void mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops);
/* mlx5_flow_verbs.c */
void mlx5_flow_verbs_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops);
#endif /* RTE_PMD_MLX5_FLOW_H_ */ #endif /* RTE_PMD_MLX5_FLOW_H_ */

View File

@ -1358,23 +1358,13 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
} }
} }
/** const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
* Fills the flow_ops with the function pointers. .validate = flow_dv_validate,
* .prepare = flow_dv_prepare,
* @param[out] flow_ops .translate = flow_dv_translate,
* Pointer to driver_ops structure. .apply = flow_dv_apply,
*/ .remove = flow_dv_remove,
void .destroy = flow_dv_destroy,
mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops) };
{
*flow_ops = (struct mlx5_flow_driver_ops) {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.translate = flow_dv_translate,
.apply = flow_dv_apply,
.remove = flow_dv_remove,
.destroy = flow_dv_destroy,
};
}
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ #endif /* HAVE_IBV_FLOW_DV_SUPPORT */

View File

@ -1643,15 +1643,11 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
return -rte_errno; return -rte_errno;
} }
void const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
mlx5_flow_verbs_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops) .validate = flow_verbs_validate,
{ .prepare = flow_verbs_prepare,
*flow_ops = (struct mlx5_flow_driver_ops) { .translate = flow_verbs_translate,
.validate = flow_verbs_validate, .apply = flow_verbs_apply,
.prepare = flow_verbs_prepare, .remove = flow_verbs_remove,
.translate = flow_verbs_translate, .destroy = flow_verbs_destroy,
.apply = flow_verbs_apply, };
.remove = flow_verbs_remove,
.destroy = flow_verbs_destroy,
};
}