net/mlx5: flow counter support

Example for setting rule for counting packets with dest
ip = 192.168.3.1 in testpmd:

testpmd: flow create 0 ingress pattern eth / ipv4 dst is 192.168.3.1
/ end actions queue index 0 / count / end

Reading the number of packets and bytes for the rule:

testpmd: flow query 0 0 count

Note: This feature is only supported starting Mellanox OFED 4.2

Signed-off-by: Ori Kam <orika@mellanox.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Ori Kam 2017-10-10 16:22:54 +02:00 committed by Ferruh Yigit
parent 78c7406b7b
commit 9a761de8ea
4 changed files with 214 additions and 0 deletions

View File

@ -144,6 +144,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
/usr/include/linux/ethtool.h \
enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
infiniband/verbs.h \
enum IBV_FLOW_SPEC_ACTION_COUNT \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.

View File

@ -548,6 +548,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int idx;
int i;
struct mlx5dv_context attrs_out;
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
struct ibv_counter_set_description cs_desc;
#endif
(void)pci_drv;
assert(pci_drv == &mlx5_driver);
@ -667,6 +670,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_device_attr_ex device_attr_ex;
struct ether_addr mac;
uint16_t num_vfs = 0;
struct ibv_device_attr_ex device_attr;
struct mlx5_args args = {
.cqe_comp = MLX5_ARG_UNSET,
.txq_inline = MLX5_ARG_UNSET,
@ -721,6 +725,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
goto port_error;
}
ibv_query_device_ex(ctx, NULL, &device_attr);
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
@ -798,6 +803,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("L2 tunnel checksum offloads are %ssupported",
(priv->hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
priv->counter_set_supported = !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs,
cs_desc.attributes);
#endif
priv->ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable

View File

@ -117,6 +117,7 @@ struct priv {
unsigned int isolated:1; /* Whether isolated mode is enabled. */
unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
unsigned int counter_set_supported:1; /* Counter set is supported. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
@ -276,6 +277,9 @@ int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
struct rte_flow_error *);
void priv_flow_flush(struct priv *, struct mlx5_flows *);
int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *,
enum rte_flow_action_type, void *,
struct rte_flow_error *);
int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
int priv_flow_start(struct priv *, struct mlx5_flows *);
void priv_flow_stop(struct priv *, struct mlx5_flows *);

View File

@ -59,6 +59,25 @@
#define MLX5_IPV4 4
#define MLX5_IPV6 6
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
struct ibv_counter_set_init_attr {
int dummy;
};
struct ibv_flow_spec_counter_action {
int dummy;
};
struct ibv_counter_set {
int dummy;
};
static inline int
ibv_destroy_counter_set(struct ibv_counter_set *cs)
{
(void)cs;
return -ENOTSUP;
}
#endif
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
@ -107,6 +126,9 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
static int
mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
static int
mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
/* Hash RX queue types. */
enum hash_rxq_type {
HASH_RXQ_TCPV4,
@ -190,6 +212,12 @@ const struct hash_rxq_init hash_rxq_init[] = {
/* Number of entries in hash_rxq_init[]. */
const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
/** Structure for holding counter stats. */
struct mlx5_flow_counter_stats {
uint64_t hits; /**< Number of packets matched by the rule. */
uint64_t bytes; /**< Number of bytes matched by the rule. */
};
/** Structure for Drop queue. */
struct mlx5_hrxq_drop {
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
@ -220,6 +248,8 @@ struct rte_flow {
uint16_t (*queues)[]; /**< Queues indexes to use. */
struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
union {
struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
/**< Flow with Rx queue. */
@ -275,6 +305,9 @@ static const enum rte_flow_action_type valid_actions[] = {
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
RTE_FLOW_ACTION_TYPE_FLAG,
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
RTE_FLOW_ACTION_TYPE_COUNT,
#endif
RTE_FLOW_ACTION_TYPE_END,
};
@ -403,12 +436,14 @@ struct mlx5_flow_parse {
/**< Whether resources should remain after a validate. */
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t mark:1; /**< Mark is present in the flow. */
uint32_t count:1; /**< Count is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
uint16_t queues_n; /**< Number of entries in queue[]. */
struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
enum hash_rxq_type layer; /**< Last pattern layer detected. */
struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
union {
struct {
struct ibv_flow_attr *ibv_attr;
@ -430,7 +465,11 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.create = mlx5_flow_create,
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
.query = mlx5_flow_query,
#else
.query = NULL,
#endif
.isolate = mlx5_flow_isolate,
};
@ -740,6 +779,9 @@ priv_flow_convert_actions(struct priv *priv,
parser->mark_id = mark->id;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
priv->counter_set_supported) {
parser->count = 1;
} else {
goto exit_action_not_supported;
}
@ -837,6 +879,16 @@ priv_flow_convert_items_validate(struct priv *priv,
parser->queue[i].offset +=
sizeof(struct ibv_flow_spec_action_tag);
}
if (parser->count) {
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
if (parser->drop) {
parser->drop_q.offset += size;
} else {
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset += size;
}
}
return 0;
exit_item_not_supported:
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
@ -1111,6 +1163,11 @@ priv_flow_convert(struct priv *priv,
}
if (parser->mark)
mlx5_flow_create_flag_mark(parser, parser->mark_id);
if (parser->count && parser->create) {
mlx5_flow_create_count(priv, parser);
if (!parser->cs)
goto exit_count_error;
}
/*
* Last step. Complete missing specification to reach the RSS
* configuration.
@ -1142,6 +1199,10 @@ priv_flow_convert(struct priv *priv,
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot allocate verbs spec attributes.");
return ret;
exit_count_error:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create counter.");
return rte_errno;
}
/**
@ -1538,6 +1599,40 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
return 0;
}
/**
* Convert count action to Verbs specification.
*
* @param priv
* Pointer to private structure.
* @param parser
* Pointer to MLX5 flow parser structure.
*
* @return
* 0 on success, errno value on failure.
*/
static int
mlx5_flow_create_count(struct priv *priv __rte_unused,
struct mlx5_flow_parse *parser __rte_unused)
{
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
struct ibv_counter_set_init_attr init_attr = {0};
struct ibv_flow_spec_counter_action counter = {
.type = IBV_FLOW_SPEC_ACTION_COUNT,
.size = size,
.counter_set_handle = 0,
};
init_attr.counter_set_id = 0;
parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
if (!parser->cs)
return EINVAL;
counter.counter_set_handle = parser->cs->handle;
mlx5_flow_create_copy(parser, &counter, size);
#endif
return 0;
}
/**
* Complete flow rule creation with a drop queue.
*
@ -1580,6 +1675,8 @@ priv_flow_create_action_queue_drop(struct priv *priv,
parser->drop_q.ibv_attr = NULL;
flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp,
flow->drxq.ibv_attr);
if (parser->count)
flow->cs = parser->cs;
if (!flow->drxq.ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
@ -1597,6 +1694,11 @@ priv_flow_create_action_queue_drop(struct priv *priv,
rte_free(flow->drxq.ibv_attr);
flow->drxq.ibv_attr = NULL;
}
if (flow->cs) {
claim_zero(ibv_destroy_counter_set(flow->cs));
flow->cs = NULL;
parser->cs = NULL;
}
return err;
}
@ -1687,6 +1789,8 @@ priv_flow_create_action_queue(struct priv *priv,
err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
if (err)
goto error;
if (parser->count)
flow->cs = parser->cs;
if (!priv->dev->data->dev_started)
return 0;
for (i = 0; i != hash_rxq_init_n; ++i) {
@ -1727,6 +1831,11 @@ priv_flow_create_action_queue(struct priv *priv,
if (flow->frxq[i].ibv_attr)
rte_free(flow->frxq[i].ibv_attr);
}
if (flow->cs) {
claim_zero(ibv_destroy_counter_set(flow->cs));
flow->cs = NULL;
parser->cs = NULL;
}
return err;
}
@ -1870,6 +1979,10 @@ priv_flow_destroy(struct priv *priv,
{
unsigned int i;
if (flow->cs) {
claim_zero(ibv_destroy_counter_set(flow->cs));
flow->cs = NULL;
}
if (flow->drop || !flow->mark)
goto free;
for (i = 0; i != flow->queues_n; ++i) {
@ -2340,6 +2453,86 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
return 0;
}
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
/**
* Query flow counter.
*
* @param cs
* the counter set.
* @param counter_value
* returned data from the counter.
*
* @return
* 0 on success, a errno value otherwise and rte_errno is set.
*/
static int
priv_flow_query_count(struct ibv_counter_set *cs,
struct mlx5_flow_counter_stats *counter_stats,
struct rte_flow_query_count *query_count,
struct rte_flow_error *error)
{
uint64_t counters[2];
struct ibv_query_counter_set_attr query_cs_attr = {
.cs = cs,
.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
};
struct ibv_counter_set_data query_out = {
.out = counters,
.outlen = 2 * sizeof(uint64_t),
};
int res = ibv_query_counter_set(&query_cs_attr, &query_out);
if (res) {
rte_flow_error_set(error, -res,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot read counter");
return -res;
}
query_count->hits_set = 1;
query_count->bytes_set = 1;
query_count->hits = counters[0] - counter_stats->hits;
query_count->bytes = counters[1] - counter_stats->bytes;
if (query_count->reset) {
counter_stats->hits = counters[0];
counter_stats->bytes = counters[1];
}
return 0;
}
/**
* Query a flows.
*
* @see rte_flow_query()
* @see rte_flow_ops
*/
int
mlx5_flow_query(struct rte_eth_dev *dev,
struct rte_flow *flow,
enum rte_flow_action_type action __rte_unused,
void *data,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
int res = EINVAL;
priv_lock(priv);
if (flow->cs) {
res = priv_flow_query_count(flow->cs,
&flow->counter_stats,
(struct rte_flow_query_count *)data,
error);
} else {
rte_flow_error_set(error, res,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"no counter found for flow");
}
priv_unlock(priv);
return -res;
}
#endif
/**
* Isolated mode.
*