net/mlx4: simplify trigger code for flow rules

Since flow rules synchronization function mlx4_flow_sync() takes into
account the state of the device (whether it is started), trigger functions
mlx4_flow_start() and mlx4_flow_stop() are redundant. Standardize on
mlx4_flow_sync().

Use this opportunity to enhance this function with better error reporting
as the inability to start the device due to a problem with a flow rule
otherwise results in a nondescript error code.

Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
Adrien Mazarguil 2017-10-12 14:19:29 +02:00 committed by Ferruh Yigit
parent fc49cbb768
commit fee75e14f3
4 changed files with 46 additions and 73 deletions

View File

@ -60,6 +60,7 @@
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_interrupts.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
@ -97,13 +98,17 @@ static int
mlx4_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
/* Prepare internal flow rules. */
ret = mlx4_flow_sync(priv);
if (ret)
ERROR("cannot set up internal flow rules: %s",
strerror(-ret));
ret = mlx4_flow_sync(priv, &error);
if (ret) {
ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
" flow error type %d, cause %p, message: %s",
-ret, strerror(-ret), error.type, error.cause,
error.message ? error.message : "(unspecified)");
}
return ret;
}
@ -122,6 +127,7 @@ static int
mlx4_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
if (priv->started)
@ -134,10 +140,13 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev);
goto err;
}
ret = mlx4_flow_start(priv);
ret = mlx4_flow_sync(priv, &error);
if (ret) {
ERROR("%p: flow start failed: %s",
(void *)dev, strerror(ret));
ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
" flow error type %d, cause %p, message: %s",
(void *)dev,
-ret, strerror(-ret), error.type, error.cause,
error.message ? error.message : "(unspecified)");
goto err;
}
return 0;
@ -164,7 +173,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
return;
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
mlx4_flow_stop(priv);
mlx4_flow_sync(priv, NULL);
mlx4_intr_uninstall(priv);
}

View File

@ -956,14 +956,9 @@ mlx4_flow_isolate(struct rte_eth_dev *dev,
if (!!enable == !!priv->isolated)
return 0;
priv->isolated = !!enable;
if (mlx4_flow_sync(priv)) {
if (mlx4_flow_sync(priv, error)) {
priv->isolated = !enable;
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
enable ?
"cannot enter isolated mode" :
"cannot leave isolated mode");
return -rte_errno;
}
return 0;
}
@ -1075,12 +1070,14 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
*
* @param priv
* Pointer to private structure.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx4_flow_sync(struct priv *priv)
mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
{
struct rte_flow *flow;
int ret;
@ -1094,20 +1091,27 @@ mlx4_flow_sync(struct priv *priv)
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
claim_zero(mlx4_flow_destroy(priv->dev, flow, NULL));
claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
} else if (!LIST_FIRST(&priv->flows) ||
!LIST_FIRST(&priv->flows)->internal) {
/*
* If the first rule is not internal outside isolated mode,
* they must be added back.
*/
ret = mlx4_flow_internal(priv, NULL);
ret = mlx4_flow_internal(priv, error);
if (ret)
return ret;
}
if (priv->started)
return mlx4_flow_start(priv);
mlx4_flow_stop(priv);
/* Toggle the remaining flow rules . */
for (flow = LIST_FIRST(&priv->flows);
flow;
flow = LIST_NEXT(flow, next)) {
ret = mlx4_flow_toggle(priv, flow, priv->started, error);
if (ret)
return ret;
}
if (!priv->started)
assert(!priv->drop);
return 0;
}
@ -1129,52 +1133,6 @@ mlx4_flow_clean(struct priv *priv)
mlx4_flow_destroy(priv->dev, flow, NULL);
}
/**
* Disable flow rules.
*
* @param priv
* Pointer to private structure.
*/
void
mlx4_flow_stop(struct priv *priv)
{
struct rte_flow *flow;
for (flow = LIST_FIRST(&priv->flows);
flow;
flow = LIST_NEXT(flow, next)) {
claim_zero(mlx4_flow_toggle(priv, flow, 0, NULL));
}
assert(!priv->drop);
}
/**
* Enable flow rules.
*
* @param priv
* Pointer to private structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx4_flow_start(struct priv *priv)
{
int ret;
struct rte_flow *flow;
for (flow = LIST_FIRST(&priv->flows);
flow;
flow = LIST_NEXT(flow, next)) {
ret = mlx4_flow_toggle(priv, flow, 1, NULL);
if (unlikely(ret)) {
mlx4_flow_stop(priv);
return ret;
}
}
return 0;
}
static const struct rte_flow_ops mlx4_flow_ops = {
.validate = mlx4_flow_validate,
.create = mlx4_flow_create,

View File

@ -72,10 +72,8 @@ struct rte_flow {
/* mlx4_flow.c */
int mlx4_flow_sync(struct priv *priv);
int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
int mlx4_flow_start(struct priv *priv);
void mlx4_flow_stop(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,

View File

@ -54,6 +54,7 @@
#include <rte_common.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
@ -401,7 +402,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
dev->data->rx_queues[idx] = NULL;
/* Disable associated flows. */
mlx4_flow_sync(priv);
mlx4_flow_sync(priv, NULL);
mlx4_rxq_cleanup(rxq);
} else {
rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
@ -416,13 +417,20 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (ret) {
rte_free(rxq);
} else {
struct rte_flow_error error;
rxq->stats.idx = idx;
DEBUG("%p: adding Rx queue %p to list",
(void *)dev, (void *)rxq);
dev->data->rx_queues[idx] = rxq;
/* Re-enable associated flows. */
ret = mlx4_flow_sync(priv);
ret = mlx4_flow_sync(priv, &error);
if (ret) {
ERROR("cannot re-attach flow rules to queue %u"
" (code %d, \"%s\"), flow error type %d,"
" cause %p, message: %s", idx,
-ret, strerror(-ret), error.type, error.cause,
error.message ? error.message : "(unspecified)");
dev->data->rx_queues[idx] = NULL;
mlx4_rxq_cleanup(rxq);
rte_free(rxq);
@ -457,7 +465,7 @@ mlx4_rx_queue_release(void *dpdk_rxq)
priv->dev->data->rx_queues[i] = NULL;
break;
}
mlx4_flow_sync(priv);
mlx4_flow_sync(priv, NULL);
mlx4_rxq_cleanup(rxq);
rte_free(rxq);
}