2018-01-29 13:11:30 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2016 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2016 Mellanox Technologies, Ltd
|
2016-12-29 15:15:17 +00:00
|
|
|
*/
|
|
|
|
|
2016-12-29 15:15:18 +00:00
|
|
|
#include <sys/queue.h>
|
2018-04-25 15:27:46 +00:00
|
|
|
#include <stdint.h>
|
2016-12-29 15:15:18 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
/* Verbs header. */
|
|
|
|
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
|
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
#include <infiniband/verbs.h>
|
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic error "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
|
2018-04-19 10:07:29 +00:00
|
|
|
#include <rte_common.h>
|
ethdev: fix TPID handling in flow API
TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not
consistent with the normal stacking order of pattern items, which is
confusing to applications.
Problem is that when followed by one of these layers, the EtherType field
of the preceding layer keeps its "inner" definition, and the "outer" TPID
is provided by the subsequent layer, the reverse of how a packet looks like
on the wire:
Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ]
rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ]
Worse, when QinQ is involved, the stacking order of VLAN layers is
unspecified. It is unclear whether it should be reversed (innermost to
outermost) as well given TPID applies to the previous layer:
Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ]
rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ]
rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ]
While specifying EtherType/TPID is hopefully rarely necessary, the stacking
order in case of QinQ and the lack of documentation remain an issue.
This patch replaces TPID in the VLAN pattern item with an inner
EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr),
clarifies documentation and updates all relevant code.
It breaks ABI compatibility for the following public functions:
- rte_flow_copy()
- rte_flow_create()
- rte_flow_query()
- rte_flow_validate()
Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern
items:
- bnxt: EtherType matching is supported with and without VLAN, but TPID
matching is not and triggers an error.
- e1000: EtherType matching is only supported with the ETHERTYPE filter,
which does not support VLAN matching, therefore no impact.
- enic: same as bnxt.
- i40e: same as bnxt with existing FDIR limitations on allowed EtherType
values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support
EtherType matching.
- ixgbe: same as e1000, with additional minor change to rely on the new
E-Tag macro definition.
- mlx4: EtherType/TPID matching is not supported, no impact.
- mlx5: same as bnxt.
- mvpp2: same as bnxt.
- sfc: same as bnxt.
- tap: same as bnxt.
Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API")
Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter")
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
|
|
|
#include <rte_ether.h>
|
2018-04-25 15:27:52 +00:00
|
|
|
#include <rte_eth_ctrl.h>
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2016-12-29 15:15:17 +00:00
|
|
|
#include <rte_flow.h>
|
|
|
|
#include <rte_flow_driver.h>
|
2016-12-29 15:15:18 +00:00
|
|
|
#include <rte_malloc.h>
|
2018-01-16 09:17:52 +00:00
|
|
|
#include <rte_ip.h>
|
2016-12-29 15:15:17 +00:00
|
|
|
|
|
|
|
#include "mlx5.h"
|
2018-01-03 09:14:19 +00:00
|
|
|
#include "mlx5_defs.h"
|
2016-12-29 15:15:21 +00:00
|
|
|
#include "mlx5_prm.h"
|
2018-01-30 15:34:56 +00:00
|
|
|
#include "mlx5_glue.h"
|
2016-12-29 15:15:17 +00:00
|
|
|
|
2018-07-12 09:30:47 +00:00
|
|
|
/* Dev ops structure defined in mlx5.c */
|
|
|
|
extern const struct eth_dev_ops mlx5_dev_ops;
|
|
|
|
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
|
2018-04-23 12:33:07 +00:00
|
|
|
|
2018-07-12 09:30:50 +00:00
|
|
|
/* Pattern Layer bits. */
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
|
|
|
|
/* Masks. */
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L3 \
|
|
|
|
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
|
|
|
|
#define MLX5_FLOW_LAYER_OUTER_L4 \
|
|
|
|
(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
|
|
|
|
|
|
|
|
/* Actions that modify the fate of matching traffic. */
|
|
|
|
#define MLX5_FLOW_FATE_DROP (1u << 0)
|
|
|
|
|
|
|
|
/** Handles information leading to a drop fate. */
|
|
|
|
struct mlx5_flow_verbs {
|
|
|
|
unsigned int size; /**< Size of the attribute. */
|
|
|
|
struct {
|
|
|
|
struct ibv_flow_attr *attr;
|
|
|
|
/**< Pointer to the Specification buffer. */
|
|
|
|
uint8_t *specs; /**< Pointer to the specifications. */
|
|
|
|
};
|
|
|
|
struct ibv_flow *flow; /**< Verbs flow pointer. */
|
|
|
|
struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Flow structure. */
|
2018-07-12 09:30:47 +00:00
|
|
|
struct rte_flow {
|
|
|
|
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
|
2018-07-12 09:30:50 +00:00
|
|
|
struct rte_flow_attr attributes; /**< User flow attribute. */
|
|
|
|
uint32_t layers;
|
|
|
|
/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
|
|
|
|
uint32_t fate;
|
|
|
|
/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
|
|
|
|
struct mlx5_flow_verbs verbs; /* Verbs drop flow. */
|
2018-07-12 09:30:47 +00:00
|
|
|
};
|
2018-04-23 12:33:07 +00:00
|
|
|
|
2018-07-12 09:30:47 +00:00
|
|
|
static const struct rte_flow_ops mlx5_flow_ops = {
|
2018-07-12 09:30:50 +00:00
|
|
|
.validate = mlx5_flow_validate,
|
|
|
|
.create = mlx5_flow_create,
|
|
|
|
.destroy = mlx5_flow_destroy,
|
|
|
|
.flush = mlx5_flow_flush,
|
2018-07-12 09:30:47 +00:00
|
|
|
.isolate = mlx5_flow_isolate,
|
|
|
|
};
|
2016-12-29 15:15:18 +00:00
|
|
|
|
2018-07-12 09:30:47 +00:00
|
|
|
/* Convert FDIR request to Generic flow. */
|
|
|
|
struct mlx5_fdir {
|
|
|
|
struct rte_flow_attr attr;
|
|
|
|
struct rte_flow_action actions[2];
|
|
|
|
struct rte_flow_item items[4];
|
|
|
|
struct rte_flow_item_eth l2;
|
|
|
|
struct rte_flow_item_eth l2_mask;
|
|
|
|
union {
|
|
|
|
struct rte_flow_item_ipv4 ipv4;
|
|
|
|
struct rte_flow_item_ipv6 ipv6;
|
|
|
|
} l3;
|
|
|
|
union {
|
|
|
|
struct rte_flow_item_ipv4 ipv4;
|
|
|
|
struct rte_flow_item_ipv6 ipv6;
|
|
|
|
} l3_mask;
|
|
|
|
union {
|
|
|
|
struct rte_flow_item_udp udp;
|
|
|
|
struct rte_flow_item_tcp tcp;
|
|
|
|
} l4;
|
|
|
|
union {
|
|
|
|
struct rte_flow_item_udp udp;
|
|
|
|
struct rte_flow_item_tcp tcp;
|
|
|
|
} l4_mask;
|
|
|
|
struct rte_flow_action_queue queue;
|
|
|
|
};
|
2017-10-09 14:45:04 +00:00
|
|
|
|
2018-07-12 09:30:47 +00:00
|
|
|
/* Verbs specification header. */
|
|
|
|
struct ibv_spec_header {
|
|
|
|
enum ibv_flow_spec_type type;
|
|
|
|
uint16_t size;
|
|
|
|
};
|
2017-10-09 14:44:58 +00:00
|
|
|
|
2018-07-12 09:30:48 +00:00
|
|
|
/**
|
2018-07-12 09:30:49 +00:00
|
|
|
* Discover the maximum number of priority available.
|
2018-07-12 09:30:48 +00:00
|
|
|
*
|
|
|
|
* @param[in] dev
|
|
|
|
* Pointer to Ethernet device.
|
|
|
|
*
|
|
|
|
* @return
|
2018-07-12 09:30:49 +00:00
|
|
|
* number of supported flow priority on success, a negative errno value
|
|
|
|
* otherwise and rte_errno is set.
|
2018-07-12 09:30:48 +00:00
|
|
|
*/
|
|
|
|
int
|
2018-07-12 09:30:49 +00:00
|
|
|
mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
|
2018-07-12 09:30:48 +00:00
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct ibv_flow_attr attr;
|
|
|
|
struct ibv_flow_spec_eth eth;
|
|
|
|
struct ibv_flow_spec_action_drop drop;
|
|
|
|
} flow_attr = {
|
|
|
|
.attr = {
|
|
|
|
.num_of_specs = 2,
|
|
|
|
},
|
|
|
|
.eth = {
|
|
|
|
.type = IBV_FLOW_SPEC_ETH,
|
|
|
|
.size = sizeof(struct ibv_flow_spec_eth),
|
|
|
|
},
|
|
|
|
.drop = {
|
|
|
|
.size = sizeof(struct ibv_flow_spec_action_drop),
|
|
|
|
.type = IBV_FLOW_SPEC_ACTION_DROP,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct ibv_flow *flow;
|
|
|
|
struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
|
2018-07-12 09:30:49 +00:00
|
|
|
uint16_t vprio[] = { 8, 16 };
|
|
|
|
int i;
|
2018-07-12 09:30:48 +00:00
|
|
|
|
|
|
|
if (!drop) {
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2018-07-12 09:30:49 +00:00
|
|
|
for (i = 0; i != RTE_DIM(vprio); i++) {
|
|
|
|
flow_attr.attr.priority = vprio[i] - 1;
|
|
|
|
flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
|
2018-07-12 09:30:48 +00:00
|
|
|
if (!flow)
|
|
|
|
break;
|
|
|
|
claim_zero(mlx5_glue->destroy_flow(flow));
|
|
|
|
}
|
|
|
|
mlx5_hrxq_drop_release(dev);
|
|
|
|
DRV_LOG(INFO, "port %u flow maximum priority: %d",
|
2018-07-12 09:30:49 +00:00
|
|
|
dev->data->port_id, vprio[i - 1]);
|
|
|
|
return vprio[i - 1];
|
2018-07-12 09:30:48 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 14:44:58 +00:00
|
|
|
/**
|
2018-07-12 09:30:50 +00:00
|
|
|
* Verify the @p attributes will be correctly understood by the NIC and store
|
|
|
|
* them in the @p flow if everything is correct.
|
2017-10-09 14:44:58 +00:00
|
|
|
*
|
2018-07-12 09:30:50 +00:00
|
|
|
* @param[in] dev
|
2018-03-05 12:21:04 +00:00
|
|
|
* Pointer to Ethernet device.
|
2018-07-12 09:30:50 +00:00
|
|
|
* @param[in] attributes
|
|
|
|
* Pointer to flow attributes
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to the rte_flow structure.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_attributes(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_attr *attributes,
|
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
uint32_t priority_max =
|
|
|
|
((struct priv *)dev->data->dev_private)->config.flow_prio;
|
|
|
|
|
|
|
|
if (attributes->group)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
|
|
|
|
NULL,
|
|
|
|
"groups is not supported");
|
|
|
|
if (attributes->priority >= priority_max)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
|
|
|
|
NULL,
|
|
|
|
"priority out of range");
|
|
|
|
if (attributes->egress)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
|
|
|
|
NULL,
|
|
|
|
"egress is not supported");
|
|
|
|
if (attributes->transfer)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
|
|
|
|
NULL,
|
|
|
|
"transfer is not supported");
|
|
|
|
if (!attributes->ingress)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
|
|
|
|
NULL,
|
|
|
|
"ingress attribute is mandatory");
|
|
|
|
flow->attributes = *attributes;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Verify the @p item specifications (spec, last, mask) are compatible with the
|
|
|
|
* NIC capabilities.
|
|
|
|
*
|
|
|
|
* @param[in] item
|
|
|
|
* Item specification.
|
|
|
|
* @param[in] mask
|
|
|
|
* @p item->mask or flow default bit-masks.
|
|
|
|
* @param[in] nic_mask
|
|
|
|
* Bit-masks covering supported fields by the NIC to compare with user mask.
|
|
|
|
* @param[in] size
|
|
|
|
* Bit-masks size in bytes.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_item_acceptable(const struct rte_flow_item *item,
|
|
|
|
const uint8_t *mask,
|
|
|
|
const uint8_t *nic_mask,
|
|
|
|
unsigned int size,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
assert(nic_mask);
|
|
|
|
for (i = 0; i < size; ++i)
|
|
|
|
if ((nic_mask[i] | mask[i]) != nic_mask[i])
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
item,
|
|
|
|
"mask enables non supported"
|
|
|
|
" bits");
|
|
|
|
if (!item->spec && (item->mask || item->last))
|
|
|
|
return rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
item,
|
|
|
|
"mask/last without a spec is not"
|
|
|
|
" supported");
|
|
|
|
if (item->spec && item->last) {
|
|
|
|
uint8_t spec[size];
|
|
|
|
uint8_t last[size];
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < size; ++i) {
|
|
|
|
spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
|
|
|
|
last[i] = ((const uint8_t *)item->last)[i] & mask[i];
|
|
|
|
}
|
|
|
|
ret = memcmp(spec, last, size);
|
|
|
|
if (ret != 0)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
item,
|
|
|
|
"range is not supported");
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Add a verbs specification into @p flow.
|
|
|
|
*
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to flow structure.
|
|
|
|
* @param[in] src
|
|
|
|
* Create specification.
|
|
|
|
* @param[in] size
|
|
|
|
* Size in bytes of the specification to copy.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
|
|
|
|
{
|
|
|
|
if (flow->verbs.specs) {
|
|
|
|
void *dst;
|
|
|
|
|
|
|
|
dst = (void *)(flow->verbs.specs + flow->verbs.size);
|
|
|
|
memcpy(dst, src, size);
|
|
|
|
++flow->verbs.attr->num_of_specs;
|
|
|
|
}
|
|
|
|
flow->verbs.size += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert the @p item into a Verbs specification after ensuring the NIC
|
|
|
|
* will understand and process it correctly.
|
|
|
|
* If the necessary size for the conversion is greater than the @p flow_size,
|
|
|
|
* nothing is written in @p flow, the validation is still performed.
|
|
|
|
*
|
|
|
|
* @param[in] item
|
|
|
|
* Item specification.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to flow structure.
|
|
|
|
* @param[in] flow_size
|
|
|
|
* Size in bytes of the available space in @p flow, if too small, nothing is
|
|
|
|
* written.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* On success the number of bytes consumed/necessary, if the returned value
|
|
|
|
* is lesser or equal to @p flow_size, the @p item has fully been converted,
|
|
|
|
* otherwise another call with this returned memory size should be done.
|
|
|
|
* On error, a negative errno value is returned and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
|
|
|
|
const size_t flow_size, struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
const struct rte_flow_item_eth *spec = item->spec;
|
|
|
|
const struct rte_flow_item_eth *mask = item->mask;
|
|
|
|
const struct rte_flow_item_eth nic_mask = {
|
|
|
|
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
|
|
|
|
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
|
|
|
|
.type = RTE_BE16(0xffff),
|
|
|
|
};
|
|
|
|
const unsigned int size = sizeof(struct ibv_flow_spec_eth);
|
|
|
|
struct ibv_flow_spec_eth eth = {
|
|
|
|
.type = IBV_FLOW_SPEC_ETH,
|
|
|
|
.size = size,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
item,
|
|
|
|
"L2 layers already configured");
|
|
|
|
if (!mask)
|
|
|
|
mask = &rte_flow_item_eth_mask;
|
|
|
|
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
|
|
|
|
(const uint8_t *)&nic_mask,
|
|
|
|
sizeof(struct rte_flow_item_eth),
|
|
|
|
error);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
|
|
|
|
if (size > flow_size)
|
|
|
|
return size;
|
|
|
|
if (spec) {
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
|
|
|
|
memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
|
|
|
|
eth.val.ether_type = spec->type;
|
|
|
|
memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
|
|
|
|
memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
|
|
|
|
eth.mask.ether_type = mask->type;
|
|
|
|
/* Remove unwanted bits from values. */
|
|
|
|
for (i = 0; i < ETHER_ADDR_LEN; ++i) {
|
|
|
|
eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
|
|
|
|
eth.val.src_mac[i] &= eth.mask.src_mac[i];
|
|
|
|
}
|
|
|
|
eth.val.ether_type &= eth.mask.ether_type;
|
|
|
|
}
|
|
|
|
mlx5_flow_spec_verbs_add(flow, ð, size);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert the @p pattern into a Verbs specifications after ensuring the NIC
|
|
|
|
* will understand and process it correctly.
|
|
|
|
* The conversion is performed item per item, each of them is written into
|
|
|
|
* the @p flow if its size is lesser or equal to @p flow_size.
|
|
|
|
* Validation and memory consumption computation are still performed until the
|
|
|
|
* end of @p pattern, unless an error is encountered.
|
|
|
|
*
|
|
|
|
* @param[in] pattern
|
|
|
|
* Flow pattern.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to the rte_flow structure.
|
|
|
|
* @param[in] flow_size
|
|
|
|
* Size in bytes of the available space in @p flow, if too small some
|
|
|
|
* garbage may be present.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* On success the number of bytes consumed/necessary, if the returned value
|
|
|
|
* is lesser or equal to @p flow_size, the @pattern has fully been
|
|
|
|
* converted, otherwise another call with this returned memory size should
|
|
|
|
* be done.
|
|
|
|
* On error, a negative errno value is returned and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_items(const struct rte_flow_item pattern[],
|
|
|
|
struct rte_flow *flow, const size_t flow_size,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int remain = flow_size;
|
|
|
|
size_t size = 0;
|
|
|
|
|
|
|
|
for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (pattern->type) {
|
|
|
|
case RTE_FLOW_ITEM_TYPE_VOID:
|
|
|
|
break;
|
|
|
|
case RTE_FLOW_ITEM_TYPE_ETH:
|
|
|
|
ret = mlx5_flow_item_eth(pattern, flow, remain, error);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
pattern,
|
|
|
|
"item not supported");
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (remain > ret)
|
|
|
|
remain -= ret;
|
|
|
|
else
|
|
|
|
remain = 0;
|
|
|
|
size += ret;
|
|
|
|
}
|
|
|
|
if (!flow->layers) {
|
|
|
|
const struct rte_flow_item item = {
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_ETH,
|
|
|
|
};
|
|
|
|
|
|
|
|
return mlx5_flow_item_eth(&item, flow, flow_size, error);
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert the @p action into a Verbs specification after ensuring the NIC
|
|
|
|
* will understand and process it correctly.
|
|
|
|
* If the necessary size for the conversion is greater than the @p flow_size,
|
|
|
|
* nothing is written in @p flow, the validation is still performed.
|
|
|
|
*
|
|
|
|
* @param[in] action
|
|
|
|
* Action configuration.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to flow structure.
|
|
|
|
* @param[in] flow_size
|
|
|
|
* Size in bytes of the available space in @p flow, if too small, nothing is
|
|
|
|
* written.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* On success the number of bytes consumed/necessary, if the returned value
|
|
|
|
* is lesser or equal to @p flow_size, the @p action has fully been
|
|
|
|
* converted, otherwise another call with this returned memory size should
|
|
|
|
* be done.
|
|
|
|
* On error, a negative errno value is returned and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_action_drop(const struct rte_flow_action *action,
|
|
|
|
struct rte_flow *flow, const size_t flow_size,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
|
|
|
|
struct ibv_flow_spec_action_drop drop = {
|
|
|
|
.type = IBV_FLOW_SPEC_ACTION_DROP,
|
|
|
|
.size = size,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (flow->fate)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION,
|
|
|
|
action,
|
|
|
|
"multiple fate actions are not"
|
|
|
|
" supported");
|
|
|
|
if (size < flow_size)
|
|
|
|
mlx5_flow_spec_verbs_add(flow, &drop, size);
|
|
|
|
flow->fate |= MLX5_FLOW_FATE_DROP;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert the @p action into @p flow after ensuring the NIC will understand
|
|
|
|
* and process it correctly.
|
|
|
|
* The conversion is performed action per action, each of them is written into
|
|
|
|
* the @p flow if its size is lesser or equal to @p flow_size.
|
|
|
|
* Validation and memory consumption computation are still performed until the
|
|
|
|
* end of @p action, unless an error is encountered.
|
|
|
|
*
|
|
|
|
* @param[in] dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
* @param[in] actions
|
|
|
|
* Pointer to flow actions array.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to the rte_flow structure.
|
|
|
|
* @param[in] flow_size
|
|
|
|
* Size in bytes of the available space in @p flow, if too small some
|
|
|
|
* garbage may be present.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* On success the number of bytes consumed/necessary, if the returned value
|
|
|
|
* is lesser or equal to @p flow_size, the @p actions has fully been
|
|
|
|
* converted, otherwise another call with this returned memory size should
|
|
|
|
* be done.
|
|
|
|
* On error, a negative errno value is returned and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_actions(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow *flow, const size_t flow_size,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
size_t size = 0;
|
|
|
|
int remain = flow_size;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
|
|
|
|
switch (actions->type) {
|
|
|
|
case RTE_FLOW_ACTION_TYPE_VOID:
|
|
|
|
break;
|
|
|
|
case RTE_FLOW_ACTION_TYPE_DROP:
|
|
|
|
ret = mlx5_flow_action_drop(actions, flow, remain,
|
|
|
|
error);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION,
|
|
|
|
actions,
|
|
|
|
"action not supported");
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (remain > ret)
|
|
|
|
remain -= ret;
|
|
|
|
else
|
|
|
|
remain = 0;
|
|
|
|
size += ret;
|
|
|
|
}
|
|
|
|
if (!flow->fate)
|
|
|
|
return rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL,
|
|
|
|
"no fate action found");
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
|
|
|
|
* after ensuring the NIC will understand and process it correctly.
|
|
|
|
* The conversion is only performed item/action per item/action, each of
|
|
|
|
* them is written into the @p flow if its size is lesser or equal to @p
|
|
|
|
* flow_size.
|
|
|
|
* Validation and memory consumption computation are still performed until the
|
|
|
|
* end, unless an error is encountered.
|
|
|
|
*
|
|
|
|
* @param[in] dev
|
|
|
|
* Pointer to Ethernet device.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to flow structure.
|
|
|
|
* @param[in] flow_size
|
|
|
|
* Size in bytes of the available space in @p flow, if too small some
|
|
|
|
* garbage may be present.
|
|
|
|
* @param[in] attributes
|
2017-10-09 14:44:58 +00:00
|
|
|
* Flow rule attributes.
|
|
|
|
* @param[in] pattern
|
|
|
|
* Pattern specification (list terminated by the END pattern item).
|
|
|
|
* @param[in] actions
|
|
|
|
* Associated actions (list terminated by the END action).
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*
|
|
|
|
* @return
|
2018-07-12 09:30:50 +00:00
|
|
|
* On success the number of bytes consumed/necessary, if the returned value
|
|
|
|
* is lesser or equal to @p flow_size, the flow has fully been converted and
|
|
|
|
* can be applied, otherwise another call with this returned memory size
|
|
|
|
* should be done.
|
|
|
|
* On error, a negative errno value is returned and rte_errno is set.
|
2017-10-09 14:44:58 +00:00
|
|
|
*/
|
2018-07-12 09:30:50 +00:00
|
|
|
static int
|
|
|
|
mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
|
|
|
|
const size_t flow_size,
|
|
|
|
const struct rte_flow_attr *attributes,
|
|
|
|
const struct rte_flow_item pattern[],
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow_error *error)
|
2017-10-09 14:44:58 +00:00
|
|
|
{
|
2018-07-12 09:30:50 +00:00
|
|
|
struct rte_flow local_flow = { .layers = 0, };
|
|
|
|
size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
|
|
|
|
int remain = (flow_size > size) ? flow_size - size : 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!remain)
|
|
|
|
flow = &local_flow;
|
|
|
|
ret = mlx5_flow_attributes(dev, attributes, flow, error);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = mlx5_flow_items(pattern, flow, remain, error);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
size += ret;
|
|
|
|
remain = (flow_size > size) ? flow_size - size : 0;
|
|
|
|
ret = mlx5_flow_actions(dev, actions, flow, remain, error);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
size += ret;
|
|
|
|
if (size <= flow_size)
|
|
|
|
flow->verbs.attr->priority = flow->attributes.priority;
|
|
|
|
return size;
|
2016-12-29 15:15:17 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 14:44:58 +00:00
|
|
|
/**
|
|
|
|
* Validate a flow supported by the NIC.
|
|
|
|
*
|
|
|
|
* @see rte_flow_validate()
|
|
|
|
* @see rte_flow_ops
|
|
|
|
*/
|
|
|
|
int
|
2018-07-12 09:30:50 +00:00
|
|
|
mlx5_flow_validate(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item items[],
|
|
|
|
const struct rte_flow_action actions[],
|
2017-10-09 14:44:58 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2018-07-12 09:30:50 +00:00
|
|
|
int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Remove the flow.
|
|
|
|
*
|
|
|
|
* @param[in] dev
|
|
|
|
* Pointer to Ethernet device.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to flow structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
|
|
|
|
{
|
|
|
|
if (flow->fate & MLX5_FLOW_FATE_DROP) {
|
|
|
|
if (flow->verbs.flow) {
|
|
|
|
claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
|
|
|
|
flow->verbs.flow = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (flow->verbs.hrxq) {
|
|
|
|
mlx5_hrxq_drop_release(dev);
|
|
|
|
flow->verbs.hrxq = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Apply the flow.
|
|
|
|
*
|
|
|
|
* @param[in] dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
* @param[in, out] flow
|
|
|
|
* Pointer to flow structure.
|
|
|
|
* @param[out] error
|
|
|
|
* Pointer to error structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
|
|
|
|
if (!flow->verbs.hrxq)
|
|
|
|
return rte_flow_error_set
|
|
|
|
(error, errno,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL,
|
|
|
|
"cannot allocate Drop queue");
|
|
|
|
flow->verbs.flow =
|
|
|
|
mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
|
|
|
|
if (!flow->verbs.flow) {
|
|
|
|
mlx5_hrxq_drop_release(dev);
|
|
|
|
flow->verbs.hrxq = NULL;
|
|
|
|
return rte_flow_error_set(error, errno,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL,
|
|
|
|
"kernel module refuses to create"
|
|
|
|
" flow");
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a flow and add it to @p list.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
|
|
|
* @param list
|
|
|
|
* Pointer to a TAILQ flow list.
|
|
|
|
* @param[in] attr
|
|
|
|
* Flow rule attributes.
|
|
|
|
* @param[in] items
|
|
|
|
* Pattern specification (list terminated by the END pattern item).
|
|
|
|
* @param[in] actions
|
|
|
|
* Associated actions (list terminated by the END action).
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* A flow on success, NULL otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static struct rte_flow *
|
|
|
|
mlx5_flow_list_create(struct rte_eth_dev *dev,
|
|
|
|
struct mlx5_flows *list,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item items[],
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
struct rte_flow *flow;
|
|
|
|
size_t size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
|
|
|
|
if (ret < 0)
|
|
|
|
return NULL;
|
|
|
|
size = ret;
|
|
|
|
flow = rte_zmalloc(__func__, size, 0);
|
|
|
|
if (!flow) {
|
|
|
|
rte_flow_error_set(error, ENOMEM,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL,
|
|
|
|
"cannot allocate memory");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
|
|
|
|
flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
|
|
|
|
ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
assert((size_t)ret == size);
|
|
|
|
if (dev->data->dev_started) {
|
|
|
|
ret = mlx5_flow_apply(dev, flow, error);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(list, flow, next);
|
|
|
|
return flow;
|
|
|
|
error:
|
|
|
|
ret = rte_errno; /* Save rte_errno before cleanup. */
|
|
|
|
mlx5_flow_remove(dev, flow);
|
|
|
|
rte_free(flow);
|
|
|
|
rte_errno = ret; /* Restore rte_errno. */
|
|
|
|
return NULL;
|
2017-10-09 14:44:58 +00:00
|
|
|
}
|
|
|
|
|
2016-12-29 15:15:17 +00:00
|
|
|
/**
|
|
|
|
* Create a flow.
|
|
|
|
*
|
|
|
|
* @see rte_flow_create()
|
|
|
|
* @see rte_flow_ops
|
|
|
|
*/
|
|
|
|
struct rte_flow *
|
2018-07-12 09:30:50 +00:00
|
|
|
mlx5_flow_create(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item items[],
|
|
|
|
const struct rte_flow_action actions[],
|
2016-12-29 15:15:17 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2018-07-12 09:30:50 +00:00
|
|
|
return mlx5_flow_list_create
|
|
|
|
(dev, &((struct priv *)dev->data->dev_private)->flows,
|
|
|
|
attr, items, actions, error);
|
2016-12-29 15:15:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-03-05 12:21:04 +00:00
|
|
|
* Destroy a flow in a list.
|
2016-12-29 15:15:18 +00:00
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:44:53 +00:00
|
|
|
* @param list
|
|
|
|
* Pointer to a TAILQ flow list.
|
2016-12-29 15:15:18 +00:00
|
|
|
* @param[in] flow
|
|
|
|
* Flow to destroy.
|
|
|
|
*/
|
|
|
|
static void
|
2018-07-12 09:30:50 +00:00
|
|
|
mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
|
|
|
|
struct rte_flow *flow)
|
2016-12-29 15:15:18 +00:00
|
|
|
{
|
2018-07-12 09:30:50 +00:00
|
|
|
mlx5_flow_remove(dev, flow);
|
|
|
|
TAILQ_REMOVE(list, flow, next);
|
|
|
|
rte_free(flow);
|
2016-12-29 15:15:17 +00:00
|
|
|
}
|
|
|
|
|
2016-12-29 15:15:18 +00:00
|
|
|
/**
|
|
|
|
* Destroy all flows.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:44:53 +00:00
|
|
|
* @param list
|
|
|
|
* Pointer to a TAILQ flow list.
|
2016-12-29 15:15:18 +00:00
|
|
|
*/
|
2017-10-09 14:44:53 +00:00
|
|
|
void
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
2016-12-29 15:15:18 +00:00
|
|
|
{
|
2017-10-09 14:44:53 +00:00
|
|
|
while (!TAILQ_EMPTY(list)) {
|
2016-12-29 15:15:18 +00:00
|
|
|
struct rte_flow *flow;
|
|
|
|
|
2017-10-09 14:44:53 +00:00
|
|
|
flow = TAILQ_FIRST(list);
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_flow_list_destroy(dev, list, flow);
|
2016-12-29 15:15:18 +00:00
|
|
|
}
|
2016-12-29 15:15:17 +00:00
|
|
|
}
|
|
|
|
|
2016-12-29 15:15:18 +00:00
|
|
|
/**
|
|
|
|
* Remove all flows.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:44:53 +00:00
|
|
|
* @param list
|
|
|
|
* Pointer to a TAILQ flow list.
|
2016-12-29 15:15:18 +00:00
|
|
|
*/
|
|
|
|
void
|
2018-07-12 09:30:47 +00:00
|
|
|
mlx5_flow_stop(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
struct mlx5_flows *list __rte_unused)
|
2016-12-29 15:15:18 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Add all flows.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:44:53 +00:00
|
|
|
* @param list
|
|
|
|
* Pointer to a TAILQ flow list.
|
2016-12-29 15:15:18 +00:00
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2016-12-29 15:15:18 +00:00
|
|
|
*/
|
|
|
|
int
|
2018-07-12 09:30:47 +00:00
|
|
|
mlx5_flow_start(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
struct mlx5_flows *list __rte_unused)
|
2016-12-29 15:15:18 +00:00
|
|
|
{
|
2017-04-11 15:21:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2017-05-24 13:44:08 +00:00
|
|
|
|
2017-10-09 14:44:42 +00:00
|
|
|
/**
|
|
|
|
* Verify the flow list is empty
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:44:42 +00:00
|
|
|
*
|
|
|
|
* @return the number of flows not released.
|
|
|
|
*/
|
|
|
|
int
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_flow_verify(struct rte_eth_dev *dev)
|
2017-10-09 14:44:42 +00:00
|
|
|
{
|
2018-03-05 12:21:04 +00:00
|
|
|
struct priv *priv = dev->data->dev_private;
|
2017-10-09 14:44:42 +00:00
|
|
|
struct rte_flow *flow;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(flow, &priv->flows, next) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(DEBUG, "port %u flow %p still referenced",
|
|
|
|
dev->data->port_id, (void *)flow);
|
2017-10-09 14:44:42 +00:00
|
|
|
++ret;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2017-10-09 14:44:53 +00:00
|
|
|
|
|
|
|
/**
|
2017-10-09 14:44:55 +00:00
|
|
|
* Enable a control flow configured from the control plane.
|
2017-10-09 14:44:53 +00:00
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:44:55 +00:00
|
|
|
* @param eth_spec
|
2017-10-09 14:44:53 +00:00
|
|
|
* An Ethernet flow spec to apply.
|
2017-10-09 14:44:55 +00:00
|
|
|
* @param eth_mask
|
2017-10-09 14:44:53 +00:00
|
|
|
* An Ethernet flow mask to apply.
|
2017-10-09 14:44:55 +00:00
|
|
|
* @param vlan_spec
|
|
|
|
* A VLAN flow spec to apply.
|
|
|
|
* @param vlan_mask
|
|
|
|
* A VLAN flow mask to apply.
|
2017-10-09 14:44:53 +00:00
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:44:53 +00:00
|
|
|
*/
|
|
|
|
int
|
2017-10-09 14:44:55 +00:00
|
|
|
mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
|
|
|
|
struct rte_flow_item_eth *eth_spec,
|
|
|
|
struct rte_flow_item_eth *eth_mask,
|
|
|
|
struct rte_flow_item_vlan *vlan_spec,
|
|
|
|
struct rte_flow_item_vlan *vlan_mask)
|
2017-10-09 14:44:53 +00:00
|
|
|
{
|
|
|
|
struct priv *priv = dev->data->dev_private;
|
|
|
|
const struct rte_flow_attr attr = {
|
|
|
|
.ingress = 1,
|
2018-07-12 09:30:49 +00:00
|
|
|
.priority = priv->config.flow_prio - 1,
|
2017-10-09 14:44:53 +00:00
|
|
|
};
|
|
|
|
struct rte_flow_item items[] = {
|
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_ETH,
|
2017-10-09 14:44:55 +00:00
|
|
|
.spec = eth_spec,
|
|
|
|
.last = NULL,
|
|
|
|
.mask = eth_mask,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
|
|
|
|
RTE_FLOW_ITEM_TYPE_END,
|
|
|
|
.spec = vlan_spec,
|
2017-10-09 14:44:53 +00:00
|
|
|
.last = NULL,
|
2017-10-09 14:44:55 +00:00
|
|
|
.mask = vlan_mask,
|
2017-10-09 14:44:53 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_END,
|
|
|
|
},
|
|
|
|
};
|
2018-04-25 15:27:48 +00:00
|
|
|
uint16_t queue[priv->reta_idx_n];
|
|
|
|
struct rte_flow_action_rss action_rss = {
|
2018-04-25 15:27:52 +00:00
|
|
|
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
|
2018-04-25 15:27:54 +00:00
|
|
|
.level = 0,
|
2018-04-25 15:27:50 +00:00
|
|
|
.types = priv->rss_conf.rss_hf,
|
|
|
|
.key_len = priv->rss_conf.rss_key_len,
|
|
|
|
.queue_num = priv->reta_idx_n,
|
|
|
|
.key = priv->rss_conf.rss_key,
|
2018-04-25 15:27:48 +00:00
|
|
|
.queue = queue,
|
|
|
|
};
|
2017-10-09 14:44:53 +00:00
|
|
|
struct rte_flow_action actions[] = {
|
|
|
|
{
|
2017-10-09 14:45:04 +00:00
|
|
|
.type = RTE_FLOW_ACTION_TYPE_RSS,
|
2018-04-25 15:27:48 +00:00
|
|
|
.conf = &action_rss,
|
2017-10-09 14:44:53 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ACTION_TYPE_END,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct rte_flow *flow;
|
|
|
|
struct rte_flow_error error;
|
2017-10-09 14:45:04 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2018-03-05 12:21:06 +00:00
|
|
|
if (!priv->reta_idx_n) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2017-10-09 14:45:04 +00:00
|
|
|
for (i = 0; i != priv->reta_idx_n; ++i)
|
2018-04-25 15:27:48 +00:00
|
|
|
queue[i] = (*priv->reta_idx)[i];
|
2018-03-05 12:21:04 +00:00
|
|
|
flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
|
|
|
|
actions, &error);
|
2017-10-09 14:44:55 +00:00
|
|
|
if (!flow)
|
2018-03-05 12:21:06 +00:00
|
|
|
return -rte_errno;
|
2017-10-09 14:44:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2017-10-09 14:44:55 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Enable a flow control configured from the control plane.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
|
|
|
* @param eth_spec
|
|
|
|
* An Ethernet flow spec to apply.
|
|
|
|
* @param eth_mask
|
|
|
|
* An Ethernet flow mask to apply.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:44:55 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_ctrl_flow(struct rte_eth_dev *dev,
|
|
|
|
struct rte_flow_item_eth *eth_spec,
|
|
|
|
struct rte_flow_item_eth *eth_mask)
|
|
|
|
{
|
|
|
|
return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
|
|
|
|
}
|
2017-10-09 14:45:02 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Destroy a flow.
|
|
|
|
*
|
|
|
|
* @see rte_flow_destroy()
|
|
|
|
* @see rte_flow_ops
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_flow_destroy(struct rte_eth_dev *dev,
|
|
|
|
struct rte_flow *flow,
|
2018-03-05 12:20:59 +00:00
|
|
|
struct rte_flow_error *error __rte_unused)
|
2017-10-09 14:45:02 +00:00
|
|
|
{
|
|
|
|
struct priv *priv = dev->data->dev_private;
|
|
|
|
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_flow_list_destroy(dev, &priv->flows, flow);
|
2017-10-09 14:45:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Destroy all flows.
|
|
|
|
*
|
|
|
|
* @see rte_flow_flush()
|
|
|
|
* @see rte_flow_ops
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_flow_flush(struct rte_eth_dev *dev,
|
2018-03-05 12:20:59 +00:00
|
|
|
struct rte_flow_error *error __rte_unused)
|
2017-10-09 14:45:02 +00:00
|
|
|
{
|
|
|
|
struct priv *priv = dev->data->dev_private;
|
|
|
|
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_flow_list_flush(dev, &priv->flows);
|
2017-10-09 14:45:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Isolated mode.
|
|
|
|
*
|
|
|
|
* @see rte_flow_isolate()
|
|
|
|
* @see rte_flow_ops
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_flow_isolate(struct rte_eth_dev *dev,
|
|
|
|
int enable,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
struct priv *priv = dev->data->dev_private;
|
|
|
|
|
|
|
|
if (dev->data->dev_started) {
|
|
|
|
rte_flow_error_set(error, EBUSY,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL,
|
|
|
|
"port must be stopped first");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
priv->isolated = !!enable;
|
2017-10-09 14:45:06 +00:00
|
|
|
if (enable)
|
2018-05-09 11:04:50 +00:00
|
|
|
dev->dev_ops = &mlx5_dev_ops_isolate;
|
2017-10-09 14:45:06 +00:00
|
|
|
else
|
2018-05-09 11:04:50 +00:00
|
|
|
dev->dev_ops = &mlx5_dev_ops;
|
2017-10-09 14:45:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2017-10-09 14:45:05 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert a flow director filter to a generic flow.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
* @param fdir_filter
|
|
|
|
* Flow director filter to add.
|
|
|
|
* @param attributes
|
|
|
|
* Generic flow parameters structure.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
2017-10-09 14:45:05 +00:00
|
|
|
const struct rte_eth_fdir_filter *fdir_filter,
|
|
|
|
struct mlx5_fdir *attributes)
|
|
|
|
{
|
2018-03-05 12:21:04 +00:00
|
|
|
struct priv *priv = dev->data->dev_private;
|
2017-10-09 14:45:05 +00:00
|
|
|
const struct rte_eth_fdir_input *input = &fdir_filter->input;
|
2018-04-17 09:01:36 +00:00
|
|
|
const struct rte_eth_fdir_masks *mask =
|
|
|
|
&dev->data->dev_conf.fdir_conf.mask;
|
2017-10-09 14:45:05 +00:00
|
|
|
|
|
|
|
/* Validate queue number. */
|
|
|
|
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "port %u invalid queue number %d",
|
|
|
|
dev->data->port_id, fdir_filter->action.rx_queue);
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
attributes->attr.ingress = 1;
|
|
|
|
attributes->items[0] = (struct rte_flow_item) {
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_ETH,
|
|
|
|
.spec = &attributes->l2,
|
2017-10-27 06:50:00 +00:00
|
|
|
.mask = &attributes->l2_mask,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
2017-10-24 15:18:15 +00:00
|
|
|
switch (fdir_filter->action.behavior) {
|
|
|
|
case RTE_ETH_FDIR_ACCEPT:
|
|
|
|
attributes->actions[0] = (struct rte_flow_action){
|
|
|
|
.type = RTE_FLOW_ACTION_TYPE_QUEUE,
|
|
|
|
.conf = &attributes->queue,
|
|
|
|
};
|
|
|
|
break;
|
|
|
|
case RTE_ETH_FDIR_REJECT:
|
|
|
|
attributes->actions[0] = (struct rte_flow_action){
|
|
|
|
.type = RTE_FLOW_ACTION_TYPE_DROP,
|
|
|
|
};
|
|
|
|
break;
|
|
|
|
default:
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "port %u invalid behavior %d",
|
|
|
|
dev->data->port_id,
|
|
|
|
fdir_filter->action.behavior);
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return -rte_errno;
|
2017-10-24 15:18:15 +00:00
|
|
|
}
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->queue.index = fdir_filter->action.rx_queue;
|
2018-04-17 09:01:35 +00:00
|
|
|
/* Handle L3. */
|
2017-10-09 14:45:05 +00:00
|
|
|
switch (fdir_filter->input.flow_type) {
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
|
|
|
|
attributes->l3.ipv4.hdr = (struct ipv4_hdr){
|
|
|
|
.src_addr = input->flow.ip4_flow.src_ip,
|
|
|
|
.dst_addr = input->flow.ip4_flow.dst_ip,
|
|
|
|
.time_to_live = input->flow.ip4_flow.ttl,
|
|
|
|
.type_of_service = input->flow.ip4_flow.tos,
|
|
|
|
.next_proto_id = input->flow.ip4_flow.proto,
|
|
|
|
};
|
2018-04-17 09:01:36 +00:00
|
|
|
attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
|
|
|
|
.src_addr = mask->ipv4_mask.src_ip,
|
|
|
|
.dst_addr = mask->ipv4_mask.dst_ip,
|
|
|
|
.time_to_live = mask->ipv4_mask.ttl,
|
|
|
|
.type_of_service = mask->ipv4_mask.tos,
|
|
|
|
.next_proto_id = mask->ipv4_mask.proto,
|
|
|
|
};
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->items[1] = (struct rte_flow_item){
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_IPV4,
|
|
|
|
.spec = &attributes->l3,
|
2018-04-17 09:01:36 +00:00
|
|
|
.mask = &attributes->l3_mask,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
|
|
|
break;
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
|
2018-04-17 09:01:35 +00:00
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->l3.ipv6.hdr = (struct ipv6_hdr){
|
2018-04-17 09:01:36 +00:00
|
|
|
.hop_limits = input->flow.ipv6_flow.hop_limits,
|
|
|
|
.proto = input->flow.ipv6_flow.proto,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
2018-04-17 09:01:36 +00:00
|
|
|
|
2017-10-09 14:45:05 +00:00
|
|
|
memcpy(attributes->l3.ipv6.hdr.src_addr,
|
2018-04-17 09:01:35 +00:00
|
|
|
input->flow.ipv6_flow.src_ip,
|
2017-10-09 14:45:05 +00:00
|
|
|
RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
|
|
|
|
memcpy(attributes->l3.ipv6.hdr.dst_addr,
|
2018-04-17 09:01:35 +00:00
|
|
|
input->flow.ipv6_flow.dst_ip,
|
2017-10-09 14:45:05 +00:00
|
|
|
RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
|
2018-04-17 09:01:36 +00:00
|
|
|
memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
|
|
|
|
mask->ipv6_mask.src_ip,
|
|
|
|
RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
|
|
|
|
memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
|
|
|
|
mask->ipv6_mask.dst_ip,
|
|
|
|
RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->items[1] = (struct rte_flow_item){
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_IPV6,
|
|
|
|
.spec = &attributes->l3,
|
2018-04-17 09:01:36 +00:00
|
|
|
.mask = &attributes->l3_mask,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
2018-04-17 09:01:35 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DRV_LOG(ERR, "port %u invalid flow type%d",
|
|
|
|
dev->data->port_id, fdir_filter->input.flow_type);
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
/* Handle L4. */
|
|
|
|
switch (fdir_filter->input.flow_type) {
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
|
|
|
|
attributes->l4.udp.hdr = (struct udp_hdr){
|
|
|
|
.src_port = input->flow.udp4_flow.src_port,
|
|
|
|
.dst_port = input->flow.udp4_flow.dst_port,
|
|
|
|
};
|
2018-04-17 09:01:36 +00:00
|
|
|
attributes->l4_mask.udp.hdr = (struct udp_hdr){
|
|
|
|
.src_port = mask->src_port_mask,
|
|
|
|
.dst_port = mask->dst_port_mask,
|
|
|
|
};
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->items[2] = (struct rte_flow_item){
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_UDP,
|
|
|
|
.spec = &attributes->l4,
|
2018-04-17 09:01:36 +00:00
|
|
|
.mask = &attributes->l4_mask,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
|
|
|
break;
|
2018-04-17 09:01:35 +00:00
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
|
|
|
|
attributes->l4.tcp.hdr = (struct tcp_hdr){
|
|
|
|
.src_port = input->flow.tcp4_flow.src_port,
|
|
|
|
.dst_port = input->flow.tcp4_flow.dst_port,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
2018-04-17 09:01:36 +00:00
|
|
|
attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
|
|
|
|
.src_port = mask->src_port_mask,
|
|
|
|
.dst_port = mask->dst_port_mask,
|
|
|
|
};
|
2018-04-17 09:01:35 +00:00
|
|
|
attributes->items[2] = (struct rte_flow_item){
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_TCP,
|
|
|
|
.spec = &attributes->l4,
|
2018-04-17 09:01:36 +00:00
|
|
|
.mask = &attributes->l4_mask,
|
2018-04-17 09:01:35 +00:00
|
|
|
};
|
|
|
|
break;
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
|
|
|
|
attributes->l4.udp.hdr = (struct udp_hdr){
|
|
|
|
.src_port = input->flow.udp6_flow.src_port,
|
|
|
|
.dst_port = input->flow.udp6_flow.dst_port,
|
|
|
|
};
|
2018-04-17 09:01:36 +00:00
|
|
|
attributes->l4_mask.udp.hdr = (struct udp_hdr){
|
|
|
|
.src_port = mask->src_port_mask,
|
|
|
|
.dst_port = mask->dst_port_mask,
|
|
|
|
};
|
2018-04-17 09:01:35 +00:00
|
|
|
attributes->items[2] = (struct rte_flow_item){
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_UDP,
|
|
|
|
.spec = &attributes->l4,
|
2018-04-17 09:01:36 +00:00
|
|
|
.mask = &attributes->l4_mask,
|
2018-04-17 09:01:35 +00:00
|
|
|
};
|
|
|
|
break;
|
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->l4.tcp.hdr = (struct tcp_hdr){
|
|
|
|
.src_port = input->flow.tcp6_flow.src_port,
|
|
|
|
.dst_port = input->flow.tcp6_flow.dst_port,
|
|
|
|
};
|
2018-04-17 09:01:36 +00:00
|
|
|
attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
|
|
|
|
.src_port = mask->src_port_mask,
|
|
|
|
.dst_port = mask->dst_port_mask,
|
|
|
|
};
|
2017-10-09 14:45:05 +00:00
|
|
|
attributes->items[2] = (struct rte_flow_item){
|
2017-10-31 15:51:14 +00:00
|
|
|
.type = RTE_FLOW_ITEM_TYPE_TCP,
|
2017-10-09 14:45:05 +00:00
|
|
|
.spec = &attributes->l4,
|
2018-04-17 09:01:36 +00:00
|
|
|
.mask = &attributes->l4_mask,
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
|
|
|
break;
|
2018-04-17 09:01:35 +00:00
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
|
2017-10-09 14:45:05 +00:00
|
|
|
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
|
|
|
|
break;
|
|
|
|
default:
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "port %u invalid flow type%d",
|
|
|
|
dev->data->port_id, fdir_filter->input.flow_type);
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Add new flow director filter and store it in list.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
* @param fdir_filter
|
|
|
|
* Flow director filter to add.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_filter_add(struct rte_eth_dev *dev,
|
2017-10-09 14:45:05 +00:00
|
|
|
const struct rte_eth_fdir_filter *fdir_filter)
|
|
|
|
{
|
2018-03-05 12:21:04 +00:00
|
|
|
struct priv *priv = dev->data->dev_private;
|
2017-10-09 14:45:05 +00:00
|
|
|
struct mlx5_fdir attributes = {
|
|
|
|
.attr.group = 0,
|
2017-10-27 06:50:00 +00:00
|
|
|
.l2_mask = {
|
|
|
|
.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
|
|
|
|
.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
|
|
|
|
.type = 0,
|
|
|
|
},
|
2017-10-09 14:45:05 +00:00
|
|
|
};
|
|
|
|
struct rte_flow_error error;
|
|
|
|
struct rte_flow *flow;
|
|
|
|
int ret;
|
|
|
|
|
2018-03-05 12:21:04 +00:00
|
|
|
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
|
2017-10-09 14:45:05 +00:00
|
|
|
if (ret)
|
2018-03-05 12:21:06 +00:00
|
|
|
return ret;
|
2018-03-05 12:21:04 +00:00
|
|
|
flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
|
|
|
|
attributes.items, attributes.actions,
|
|
|
|
&error);
|
2017-10-09 14:45:05 +00:00
|
|
|
if (flow) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
|
|
|
|
(void *)flow);
|
2017-10-09 14:45:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2018-03-05 12:21:06 +00:00
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Delete specific filter.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
* @param fdir_filter
|
|
|
|
* Filter to be deleted.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-07-12 09:30:47 +00:00
|
|
|
mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
const struct rte_eth_fdir_filter *fdir_filter
|
|
|
|
__rte_unused)
|
2017-10-09 14:45:05 +00:00
|
|
|
{
|
2018-07-12 09:30:47 +00:00
|
|
|
rte_errno = ENOTSUP;
|
2018-03-05 12:21:06 +00:00
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Update queue for specific filter.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
* @param fdir_filter
|
|
|
|
* Filter to be updated.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_filter_update(struct rte_eth_dev *dev,
|
2017-10-09 14:45:05 +00:00
|
|
|
const struct rte_eth_fdir_filter *fdir_filter)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2018-03-05 12:21:04 +00:00
|
|
|
ret = mlx5_fdir_filter_delete(dev, fdir_filter);
|
2017-10-09 14:45:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-03-05 12:21:06 +00:00
|
|
|
return mlx5_fdir_filter_add(dev, fdir_filter);
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush all filters.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
static void
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
|
2017-10-09 14:45:05 +00:00
|
|
|
{
|
2018-03-05 12:21:04 +00:00
|
|
|
struct priv *priv = dev->data->dev_private;
|
|
|
|
|
|
|
|
mlx5_flow_list_flush(dev, &priv->flows);
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get flow director information.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
* @param[out] fdir_info
|
|
|
|
* Resulting flow director information.
|
|
|
|
*/
|
|
|
|
static void
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
|
2017-10-09 14:45:05 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_fdir_masks *mask =
|
2018-05-09 11:04:50 +00:00
|
|
|
&dev->data->dev_conf.fdir_conf.mask;
|
2017-10-09 14:45:05 +00:00
|
|
|
|
2018-05-09 11:04:50 +00:00
|
|
|
fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
|
2017-10-09 14:45:05 +00:00
|
|
|
fdir_info->guarant_spc = 0;
|
|
|
|
rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
|
|
|
|
fdir_info->max_flexpayload = 0;
|
|
|
|
fdir_info->flow_types_mask[0] = 0;
|
|
|
|
fdir_info->flex_payload_unit = 0;
|
|
|
|
fdir_info->max_flex_payload_segment_num = 0;
|
|
|
|
fdir_info->flex_payload_limit = 0;
|
|
|
|
memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Deal with flow director operations.
|
|
|
|
*
|
2018-03-05 12:21:04 +00:00
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device.
|
2017-10-09 14:45:05 +00:00
|
|
|
* @param filter_op
|
|
|
|
* Operation to perform.
|
|
|
|
* @param arg
|
|
|
|
* Pointer to operation-specific structure.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
|
|
|
void *arg)
|
2017-10-09 14:45:05 +00:00
|
|
|
{
|
|
|
|
enum rte_fdir_mode fdir_mode =
|
2018-05-09 11:04:50 +00:00
|
|
|
dev->data->dev_conf.fdir_conf.mode;
|
2017-10-09 14:45:05 +00:00
|
|
|
|
|
|
|
if (filter_op == RTE_ETH_FILTER_NOP)
|
|
|
|
return 0;
|
|
|
|
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
|
|
|
|
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "port %u flow director mode %d not supported",
|
|
|
|
dev->data->port_id, fdir_mode);
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
switch (filter_op) {
|
|
|
|
case RTE_ETH_FILTER_ADD:
|
2018-03-05 12:21:06 +00:00
|
|
|
return mlx5_fdir_filter_add(dev, arg);
|
2017-10-09 14:45:05 +00:00
|
|
|
case RTE_ETH_FILTER_UPDATE:
|
2018-03-05 12:21:06 +00:00
|
|
|
return mlx5_fdir_filter_update(dev, arg);
|
2017-10-09 14:45:05 +00:00
|
|
|
case RTE_ETH_FILTER_DELETE:
|
2018-03-05 12:21:06 +00:00
|
|
|
return mlx5_fdir_filter_delete(dev, arg);
|
2017-10-09 14:45:05 +00:00
|
|
|
case RTE_ETH_FILTER_FLUSH:
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_filter_flush(dev);
|
2017-10-09 14:45:05 +00:00
|
|
|
break;
|
|
|
|
case RTE_ETH_FILTER_INFO:
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_fdir_info_get(dev, arg);
|
2017-10-09 14:45:05 +00:00
|
|
|
break;
|
|
|
|
default:
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(DEBUG, "port %u unknown operation %u",
|
|
|
|
dev->data->port_id, filter_op);
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
2018-03-05 12:21:06 +00:00
|
|
|
return 0;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Manage filter operations.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
* @param filter_type
|
|
|
|
* Filter type.
|
|
|
|
* @param filter_op
|
|
|
|
* Operation to perform.
|
|
|
|
* @param arg
|
|
|
|
* Pointer to operation-specific structure.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2017-10-09 14:45:05 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
|
|
|
|
enum rte_filter_type filter_type,
|
|
|
|
enum rte_filter_op filter_op,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
switch (filter_type) {
|
|
|
|
case RTE_ETH_FILTER_GENERIC:
|
2018-03-05 12:21:06 +00:00
|
|
|
if (filter_op != RTE_ETH_FILTER_GET) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2017-10-09 14:45:05 +00:00
|
|
|
*(const void **)arg = &mlx5_flow_ops;
|
|
|
|
return 0;
|
|
|
|
case RTE_ETH_FILTER_FDIR:
|
2018-03-05 12:21:06 +00:00
|
|
|
return mlx5_fdir_ctrl_func(dev, filter_op, arg);
|
2017-10-09 14:45:05 +00:00
|
|
|
default:
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "port %u filter type (%d) not supported",
|
|
|
|
dev->data->port_id, filter_type);
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return -rte_errno;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|
2018-03-05 12:21:06 +00:00
|
|
|
return 0;
|
2017-10-09 14:45:05 +00:00
|
|
|
}
|