2018-01-08 13:35:35 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2017-03-16 12:41:08 +00:00
|
|
|
*
|
2021-03-11 10:00:40 +00:00
|
|
|
* Copyright(c) 2019-2021 Xilinx, Inc.
|
2020-03-30 10:27:26 +00:00
|
|
|
* Copyright(c) 2017-2019 Solarflare Communications Inc.
|
2017-03-09 15:26:27 +00:00
|
|
|
*
|
|
|
|
* This software was jointly developed between OKTET Labs (under contract
|
|
|
|
* for Solarflare) and Solarflare Communications, Inc.
|
|
|
|
*/
|
|
|
|
|
ethdev: fix TPID handling in flow API
TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not
consistent with the normal stacking order of pattern items, which is
confusing to applications.
Problem is that when followed by one of these layers, the EtherType field
of the preceding layer keeps its "inner" definition, and the "outer" TPID
is provided by the subsequent layer, the reverse of how a packet looks like
on the wire:
Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ]
rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ]
Worse, when QinQ is involved, the stacking order of VLAN layers is
unspecified. It is unclear whether it should be reversed (innermost to
outermost) as well given TPID applies to the previous layer:
Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ]
rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ]
rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ]
While specifying EtherType/TPID is hopefully rarely necessary, the stacking
order in case of QinQ and the lack of documentation remain an issue.
This patch replaces TPID in the VLAN pattern item with an inner
EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr),
clarifies documentation and updates all relevant code.
It breaks ABI compatibility for the following public functions:
- rte_flow_copy()
- rte_flow_create()
- rte_flow_query()
- rte_flow_validate()
Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern
items:
- bnxt: EtherType matching is supported with and without VLAN, but TPID
matching is not and triggers an error.
- e1000: EtherType matching is only supported with the ETHERTYPE filter,
which does not support VLAN matching, therefore no impact.
- enic: same as bnxt.
- i40e: same as bnxt with existing FDIR limitations on allowed EtherType
values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support
EtherType matching.
- ixgbe: same as e1000, with additional minor change to rely on the new
E-Tag macro definition.
- mlx4: EtherType/TPID matching is not supported, no impact.
- mlx5: same as bnxt.
- mvpp2: same as bnxt.
- sfc: same as bnxt.
- tap: same as bnxt.
Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API")
Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter")
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
|
|
|
#include <rte_byteorder.h>
|
2017-03-09 15:26:27 +00:00
|
|
|
#include <rte_tailq.h>
|
|
|
|
#include <rte_common.h>
|
2021-01-29 16:48:19 +00:00
|
|
|
#include <ethdev_driver.h>
|
2017-03-09 15:26:27 +00:00
|
|
|
#include <rte_ether.h>
|
|
|
|
#include <rte_flow.h>
|
|
|
|
#include <rte_flow_driver.h>
|
|
|
|
|
|
|
|
#include "efx.h"
|
|
|
|
|
|
|
|
#include "sfc.h"
|
2020-09-17 06:34:30 +00:00
|
|
|
#include "sfc_debug.h"
|
2017-03-09 15:26:27 +00:00
|
|
|
#include "sfc_rx.h"
|
|
|
|
#include "sfc_filter.h"
|
|
|
|
#include "sfc_flow.h"
|
2021-10-13 13:15:04 +00:00
|
|
|
#include "sfc_flow_tunnel.h"
|
2017-03-09 15:26:27 +00:00
|
|
|
#include "sfc_log.h"
|
2018-04-19 11:37:05 +00:00
|
|
|
#include "sfc_dp_rx.h"
|
2021-07-02 08:39:47 +00:00
|
|
|
#include "sfc_mae_counter.h"
|
2021-10-15 06:49:03 +00:00
|
|
|
#include "sfc_switch.h"
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2020-03-05 10:47:51 +00:00
|
|
|
struct sfc_flow_ops_by_spec {
|
|
|
|
sfc_flow_parse_cb_t *parse;
|
2020-10-20 09:12:53 +00:00
|
|
|
sfc_flow_verify_cb_t *verify;
|
2020-10-20 09:12:49 +00:00
|
|
|
sfc_flow_cleanup_cb_t *cleanup;
|
2020-03-05 10:47:52 +00:00
|
|
|
sfc_flow_insert_cb_t *insert;
|
|
|
|
sfc_flow_remove_cb_t *remove;
|
2021-07-02 08:39:48 +00:00
|
|
|
sfc_flow_query_cb_t *query;
|
2020-03-05 10:47:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
|
2020-10-20 09:12:49 +00:00
|
|
|
static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
|
2020-03-05 10:47:52 +00:00
|
|
|
static sfc_flow_insert_cb_t sfc_flow_filter_insert;
|
|
|
|
static sfc_flow_remove_cb_t sfc_flow_filter_remove;
|
2020-03-05 10:47:51 +00:00
|
|
|
|
|
|
|
static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
|
|
|
|
.parse = sfc_flow_parse_rte_to_filter,
|
2020-10-20 09:12:53 +00:00
|
|
|
.verify = NULL,
|
2020-10-20 09:12:49 +00:00
|
|
|
.cleanup = NULL,
|
2020-03-05 10:47:52 +00:00
|
|
|
.insert = sfc_flow_filter_insert,
|
|
|
|
.remove = sfc_flow_filter_remove,
|
2021-07-02 08:39:48 +00:00
|
|
|
.query = NULL,
|
2020-03-05 10:47:51 +00:00
|
|
|
};
|
|
|
|
|
2020-10-20 09:12:49 +00:00
|
|
|
static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
|
|
|
|
.parse = sfc_flow_parse_rte_to_mae,
|
2020-10-20 09:12:53 +00:00
|
|
|
.verify = sfc_mae_flow_verify,
|
2020-10-20 09:12:49 +00:00
|
|
|
.cleanup = sfc_mae_flow_cleanup,
|
2020-10-20 09:13:04 +00:00
|
|
|
.insert = sfc_mae_flow_insert,
|
|
|
|
.remove = sfc_mae_flow_remove,
|
2021-07-02 08:39:48 +00:00
|
|
|
.query = sfc_mae_flow_query,
|
2020-10-20 09:12:49 +00:00
|
|
|
};
|
|
|
|
|
2020-03-05 10:47:51 +00:00
|
|
|
static const struct sfc_flow_ops_by_spec *
|
|
|
|
sfc_flow_get_ops_by_spec(struct rte_flow *flow)
|
|
|
|
{
|
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
const struct sfc_flow_ops_by_spec *ops = NULL;
|
|
|
|
|
|
|
|
switch (spec->type) {
|
|
|
|
case SFC_FLOW_SPEC_FILTER:
|
|
|
|
ops = &sfc_flow_ops_filter;
|
|
|
|
break;
|
2020-10-20 09:12:49 +00:00
|
|
|
case SFC_FLOW_SPEC_MAE:
|
|
|
|
ops = &sfc_flow_ops_mae;
|
|
|
|
break;
|
2020-03-05 10:47:51 +00:00
|
|
|
default:
|
|
|
|
SFC_ASSERT(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ops;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
/*
|
2020-03-05 10:47:49 +00:00
|
|
|
* Currently, filter-based (VNIC) flow API is implemented in such a manner
|
|
|
|
* that each flow rule is converted to one or more hardware filters.
|
2017-03-09 15:26:27 +00:00
|
|
|
* All elements of flow rule (attributes, pattern items, actions)
|
|
|
|
* correspond to one or more fields in the efx_filter_spec_s structure
|
|
|
|
* that is responsible for the hardware filter.
|
2018-03-06 15:24:54 +00:00
|
|
|
* If some required field is unset in the flow rule, then a handful
|
|
|
|
* of filter copies will be created to cover all possible values
|
|
|
|
* of such a field.
|
2017-03-09 15:26:27 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
static sfc_flow_item_parse sfc_flow_parse_void;
|
|
|
|
static sfc_flow_item_parse sfc_flow_parse_eth;
|
2017-03-09 15:26:28 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_vlan;
|
2017-03-09 15:26:29 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_ipv4;
|
2017-03-09 15:26:30 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_ipv6;
|
2017-03-09 15:26:31 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_tcp;
|
2017-03-09 15:26:32 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_udp;
|
2018-03-06 15:24:50 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_vxlan;
|
2018-03-06 15:24:52 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_geneve;
|
2018-03-06 15:24:51 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_nvgre;
|
2021-03-16 12:51:44 +00:00
|
|
|
static sfc_flow_item_parse sfc_flow_parse_pppoex;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
|
|
|
|
unsigned int filters_count_for_one_val,
|
|
|
|
struct rte_flow_error *error);
|
|
|
|
|
2018-03-06 15:24:56 +00:00
|
|
|
typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
|
|
|
|
efx_filter_spec_t *spec,
|
|
|
|
struct sfc_filter *filter);
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
struct sfc_flow_copy_flag {
|
|
|
|
/* EFX filter specification match flag */
|
|
|
|
efx_filter_match_flags_t flag;
|
|
|
|
/* Number of values of corresponding field */
|
|
|
|
unsigned int vals_count;
|
|
|
|
/* Function to set values in specifications */
|
|
|
|
sfc_flow_spec_set_vals *set_vals;
|
2018-03-06 15:24:56 +00:00
|
|
|
/*
|
|
|
|
* Function to check that the specification is suitable
|
|
|
|
* for adding this match flag
|
|
|
|
*/
|
|
|
|
sfc_flow_spec_check *spec_check;
|
2018-03-06 15:24:55 +00:00
|
|
|
};
|
|
|
|
|
2018-03-06 15:24:57 +00:00
|
|
|
static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
|
|
|
|
static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
|
2018-03-06 15:24:55 +00:00
|
|
|
static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
|
2018-03-06 15:24:56 +00:00
|
|
|
static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
|
|
|
|
static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
|
2018-07-14 07:38:59 +00:00
|
|
|
static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
|
|
|
|
static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
|
2018-03-06 15:24:55 +00:00
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
static boolean_t
|
|
|
|
sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
|
|
|
|
{
|
|
|
|
uint8_t sum = 0;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++)
|
|
|
|
sum |= buf[i];
|
|
|
|
|
|
|
|
return (sum == 0) ? B_TRUE : B_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate item and prepare structures spec and mask for parsing
|
|
|
|
*/
|
2020-03-05 10:47:53 +00:00
|
|
|
int
|
2017-03-09 15:26:27 +00:00
|
|
|
sfc_flow_parse_init(const struct rte_flow_item *item,
|
|
|
|
const void **spec_ptr,
|
|
|
|
const void **mask_ptr,
|
|
|
|
const void *supp_mask,
|
|
|
|
const void *def_mask,
|
|
|
|
unsigned int size,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
const uint8_t *spec;
|
|
|
|
const uint8_t *mask;
|
|
|
|
const uint8_t *last;
|
|
|
|
uint8_t supp;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (item == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
|
|
|
|
"NULL item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Mask or last is set without spec");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If "mask" is not set, default mask is used,
|
|
|
|
* but if default mask is NULL, "mask" should be set
|
|
|
|
*/
|
|
|
|
if (item->mask == NULL) {
|
|
|
|
if (def_mask == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
|
|
|
|
"Mask should be specified");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2018-02-26 08:11:03 +00:00
|
|
|
mask = def_mask;
|
2017-03-09 15:26:27 +00:00
|
|
|
} else {
|
2018-02-26 08:11:03 +00:00
|
|
|
mask = item->mask;
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
2018-02-26 08:11:03 +00:00
|
|
|
spec = item->spec;
|
|
|
|
last = item->last;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If field values in "last" are either 0 or equal to the corresponding
|
|
|
|
* values in "spec" then they are ignored
|
|
|
|
*/
|
|
|
|
if (last != NULL &&
|
|
|
|
!sfc_flow_is_zero(last, size) &&
|
|
|
|
memcmp(last, spec, size) != 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Ranging is not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (supp_mask == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Supported mask for item should be specified");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:18:34 +00:00
|
|
|
/* Check that mask does not ask for more match than supp_mask */
|
2017-03-09 15:26:27 +00:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
supp = ((const uint8_t *)supp_mask)[i];
|
|
|
|
|
2018-04-25 17:18:34 +00:00
|
|
|
if (~supp & mask[i]) {
|
2017-03-09 15:26:27 +00:00
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Item's field is not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
*spec_ptr = spec;
|
|
|
|
*mask_ptr = mask;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protocol parsers.
|
|
|
|
* Masking is not supported, so masks in items should be either
|
|
|
|
* full or empty (zeroed) and set only for supported fields which
|
|
|
|
* are specified in the supp_mask.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
__rte_unused struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:27 +00:00
|
|
|
__rte_unused struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert Ethernet item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
2018-03-06 15:24:53 +00:00
|
|
|
* Item specification. Outer frame specification may only comprise
|
|
|
|
* source/destination addresses and Ethertype field.
|
|
|
|
* Inner frame specification may contain destination address only.
|
|
|
|
* There is support for individual/group mask as well as for empty and full.
|
|
|
|
* If the mask is NULL, default mask will be used. Ranging is not supported.
|
2017-03-09 15:26:27 +00:00
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_eth(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2017-03-09 15:26:27 +00:00
|
|
|
const struct rte_flow_item_eth *spec = NULL;
|
|
|
|
const struct rte_flow_item_eth *mask = NULL;
|
|
|
|
const struct rte_flow_item_eth supp_mask = {
|
|
|
|
.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
|
|
|
|
.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
|
|
|
|
.type = 0xffff,
|
|
|
|
};
|
2018-03-06 15:24:53 +00:00
|
|
|
const struct rte_flow_item_eth ifrm_supp_mask = {
|
|
|
|
.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
|
|
|
|
};
|
2017-03-09 15:26:33 +00:00
|
|
|
const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
|
|
|
|
0x01, 0x00, 0x00, 0x00, 0x00, 0x00
|
|
|
|
};
|
2018-03-06 15:24:53 +00:00
|
|
|
const struct rte_flow_item_eth *supp_mask_p;
|
|
|
|
const struct rte_flow_item_eth *def_mask_p;
|
|
|
|
uint8_t *loc_mac = NULL;
|
|
|
|
boolean_t is_ifrm = (efx_spec->efs_encap_type !=
|
|
|
|
EFX_TUNNEL_PROTOCOL_NONE);
|
|
|
|
|
|
|
|
if (is_ifrm) {
|
|
|
|
supp_mask_p = &ifrm_supp_mask;
|
|
|
|
def_mask_p = &ifrm_supp_mask;
|
|
|
|
loc_mac = efx_spec->efs_ifrm_loc_mac;
|
|
|
|
} else {
|
|
|
|
supp_mask_p = &supp_mask;
|
|
|
|
def_mask_p = &rte_flow_item_eth_mask;
|
|
|
|
loc_mac = efx_spec->efs_loc_mac;
|
|
|
|
}
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
2018-03-06 15:24:53 +00:00
|
|
|
supp_mask_p, def_mask_p,
|
2017-03-09 15:26:27 +00:00
|
|
|
sizeof(struct rte_flow_item_eth),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
/* If "spec" is not set, could be any Ethernet */
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2019-05-21 16:13:04 +00:00
|
|
|
if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
|
2018-03-06 15:24:53 +00:00
|
|
|
efx_spec->efs_match_flags |= is_ifrm ?
|
|
|
|
EFX_FILTER_MATCH_IFRM_LOC_MAC :
|
|
|
|
EFX_FILTER_MATCH_LOC_MAC;
|
|
|
|
rte_memcpy(loc_mac, spec->dst.addr_bytes,
|
2017-03-09 15:26:27 +00:00
|
|
|
EFX_MAC_ADDR_LEN);
|
2017-03-09 15:26:33 +00:00
|
|
|
} else if (memcmp(mask->dst.addr_bytes, ig_mask,
|
|
|
|
EFX_MAC_ADDR_LEN) == 0) {
|
2019-05-21 16:13:04 +00:00
|
|
|
if (rte_is_unicast_ether_addr(&spec->dst))
|
2018-03-06 15:24:53 +00:00
|
|
|
efx_spec->efs_match_flags |= is_ifrm ?
|
|
|
|
EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
|
2017-03-09 15:26:33 +00:00
|
|
|
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
|
|
|
|
else
|
2018-03-06 15:24:53 +00:00
|
|
|
efx_spec->efs_match_flags |= is_ifrm ?
|
|
|
|
EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
|
2017-03-09 15:26:33 +00:00
|
|
|
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
|
2019-05-21 16:13:04 +00:00
|
|
|
} else if (!rte_is_zero_ether_addr(&mask->dst)) {
|
2017-03-09 15:26:27 +00:00
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:53 +00:00
|
|
|
/*
|
|
|
|
* ifrm_supp_mask ensures that the source address and
|
|
|
|
* ethertype masks are equal to zero in inner frame,
|
|
|
|
* so these fields are filled in only for the outer frame
|
|
|
|
*/
|
2019-05-21 16:13:04 +00:00
|
|
|
if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
|
2017-03-09 15:26:27 +00:00
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
|
|
|
|
rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
|
|
|
|
EFX_MAC_ADDR_LEN);
|
2019-05-21 16:13:04 +00:00
|
|
|
} else if (!rte_is_zero_ether_addr(&mask->src)) {
|
2017-03-09 15:26:27 +00:00
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ether type is in big-endian byte order in item and
|
|
|
|
* in little-endian in efx_spec, so byte swap is used
|
|
|
|
*/
|
|
|
|
if (mask->type == supp_mask.type) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
|
|
|
|
efx_spec->efs_ether_type = rte_bswap16(spec->type);
|
|
|
|
} else if (mask->type != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_bad_mask:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Bad mask in the ETH pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:28 +00:00
|
|
|
/**
|
|
|
|
* Convert VLAN item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only VID field is supported.
|
|
|
|
* The mask can not be NULL. Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_vlan(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:28 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
uint16_t vid;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2017-03-09 15:26:28 +00:00
|
|
|
const struct rte_flow_item_vlan *spec = NULL;
|
|
|
|
const struct rte_flow_item_vlan *mask = NULL;
|
|
|
|
const struct rte_flow_item_vlan supp_mask = {
|
2021-10-22 11:03:12 +00:00
|
|
|
.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
|
ethdev: fix TPID handling in flow API
TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not
consistent with the normal stacking order of pattern items, which is
confusing to applications.
Problem is that when followed by one of these layers, the EtherType field
of the preceding layer keeps its "inner" definition, and the "outer" TPID
is provided by the subsequent layer, the reverse of how a packet looks like
on the wire:
Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ]
rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ]
Worse, when QinQ is involved, the stacking order of VLAN layers is
unspecified. It is unclear whether it should be reversed (innermost to
outermost) as well given TPID applies to the previous layer:
Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ]
rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ]
rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ]
While specifying EtherType/TPID is hopefully rarely necessary, the stacking
order in case of QinQ and the lack of documentation remain an issue.
This patch replaces TPID in the VLAN pattern item with an inner
EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr),
clarifies documentation and updates all relevant code.
It breaks ABI compatibility for the following public functions:
- rte_flow_copy()
- rte_flow_create()
- rte_flow_query()
- rte_flow_validate()
Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern
items:
- bnxt: EtherType matching is supported with and without VLAN, but TPID
matching is not and triggers an error.
- e1000: EtherType matching is only supported with the ETHERTYPE filter,
which does not support VLAN matching, therefore no impact.
- enic: same as bnxt.
- i40e: same as bnxt with existing FDIR limitations on allowed EtherType
values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support
EtherType matching.
- ixgbe: same as e1000, with additional minor change to rely on the new
E-Tag macro definition.
- mlx4: EtherType/TPID matching is not supported, no impact.
- mlx5: same as bnxt.
- mvpp2: same as bnxt.
- sfc: same as bnxt.
- tap: same as bnxt.
Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API")
Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter")
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
|
|
|
.inner_type = RTE_BE16(0xffff),
|
2017-03-09 15:26:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
NULL,
|
|
|
|
sizeof(struct rte_flow_item_vlan),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VID is in big-endian byte order in item and
|
|
|
|
* in little-endian in efx_spec, so byte swap is used.
|
|
|
|
* If two VLAN items are included, the first matches
|
|
|
|
* the outer tag and the next matches the inner tag.
|
|
|
|
*/
|
|
|
|
if (mask->tci == supp_mask.tci) {
|
2018-06-29 15:23:31 +00:00
|
|
|
/* Apply mask to keep VID only */
|
|
|
|
vid = rte_bswap16(spec->tci & mask->tci);
|
2017-03-09 15:26:28 +00:00
|
|
|
|
|
|
|
if (!(efx_spec->efs_match_flags &
|
|
|
|
EFX_FILTER_MATCH_OUTER_VID)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
|
|
|
|
efx_spec->efs_outer_vid = vid;
|
|
|
|
} else if (!(efx_spec->efs_match_flags &
|
|
|
|
EFX_FILTER_MATCH_INNER_VID)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
|
|
|
|
efx_spec->efs_inner_vid = vid;
|
|
|
|
} else {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"More than two VLAN items");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"VLAN ID in TCI match is required");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
ethdev: fix TPID handling in flow API
TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not
consistent with the normal stacking order of pattern items, which is
confusing to applications.
Problem is that when followed by one of these layers, the EtherType field
of the preceding layer keeps its "inner" definition, and the "outer" TPID
is provided by the subsequent layer, the reverse of how a packet looks like
on the wire:
Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ]
rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ]
Worse, when QinQ is involved, the stacking order of VLAN layers is
unspecified. It is unclear whether it should be reversed (innermost to
outermost) as well given TPID applies to the previous layer:
Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ]
rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ]
rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ]
While specifying EtherType/TPID is hopefully rarely necessary, the stacking
order in case of QinQ and the lack of documentation remain an issue.
This patch replaces TPID in the VLAN pattern item with an inner
EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr),
clarifies documentation and updates all relevant code.
It breaks ABI compatibility for the following public functions:
- rte_flow_copy()
- rte_flow_create()
- rte_flow_query()
- rte_flow_validate()
Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern
items:
- bnxt: EtherType matching is supported with and without VLAN, but TPID
matching is not and triggers an error.
- e1000: EtherType matching is only supported with the ETHERTYPE filter,
which does not support VLAN matching, therefore no impact.
- enic: same as bnxt.
- i40e: same as bnxt with existing FDIR limitations on allowed EtherType
values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support
EtherType matching.
- ixgbe: same as e1000, with additional minor change to rely on the new
E-Tag macro definition.
- mlx4: EtherType/TPID matching is not supported, no impact.
- mlx5: same as bnxt.
- mvpp2: same as bnxt.
- sfc: same as bnxt.
- tap: same as bnxt.
Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API")
Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter")
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
|
|
|
if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"VLAN TPID matching is not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (mask->inner_type == supp_mask.inner_type) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
|
|
|
|
efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
|
|
|
|
} else if (mask->inner_type) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Bad mask for VLAN inner_type");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:29 +00:00
|
|
|
/**
|
|
|
|
* Convert IPv4 item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only source and destination addresses and
|
|
|
|
* protocol fields are supported. If the mask is NULL, default
|
|
|
|
* mask will be used. Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_ipv4(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:29 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2017-03-09 15:26:29 +00:00
|
|
|
const struct rte_flow_item_ipv4 *spec = NULL;
|
|
|
|
const struct rte_flow_item_ipv4 *mask = NULL;
|
|
|
|
const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
|
|
|
|
const struct rte_flow_item_ipv4 supp_mask = {
|
|
|
|
.hdr = {
|
|
|
|
.src_addr = 0xffffffff,
|
|
|
|
.dst_addr = 0xffffffff,
|
|
|
|
.next_proto_id = 0xff,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_ipv4_mask,
|
|
|
|
sizeof(struct rte_flow_item_ipv4),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filtering by IPv4 source and destination addresses requires
|
|
|
|
* the appropriate ETHER_TYPE in hardware filters
|
|
|
|
*/
|
|
|
|
if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
|
|
|
|
efx_spec->efs_ether_type = ether_type_ipv4;
|
|
|
|
} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Ethertype in pattern with IPV4 item should be appropriate");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IPv4 addresses are in big-endian byte order in item and in
|
|
|
|
* efx_spec
|
|
|
|
*/
|
|
|
|
if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
|
|
|
|
efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
|
|
|
|
} else if (mask->hdr.src_addr != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
|
|
|
|
efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
|
|
|
|
} else if (mask->hdr.dst_addr != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
|
|
|
|
efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
|
|
|
|
} else if (mask->hdr.next_proto_id != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_bad_mask:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Bad mask in the IPV4 pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:30 +00:00
|
|
|
/**
|
|
|
|
* Convert IPv6 item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only source and destination addresses and
|
|
|
|
* next header fields are supported. If the mask is NULL, default
|
|
|
|
* mask will be used. Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_ipv6(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:30 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2017-03-09 15:26:30 +00:00
|
|
|
const struct rte_flow_item_ipv6 *spec = NULL;
|
|
|
|
const struct rte_flow_item_ipv6 *mask = NULL;
|
|
|
|
const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
|
|
|
|
const struct rte_flow_item_ipv6 supp_mask = {
|
|
|
|
.hdr = {
|
|
|
|
.src_addr = { 0xff, 0xff, 0xff, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0xff },
|
|
|
|
.dst_addr = { 0xff, 0xff, 0xff, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0xff },
|
|
|
|
.proto = 0xff,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_ipv6_mask,
|
|
|
|
sizeof(struct rte_flow_item_ipv6),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filtering by IPv6 source and destination addresses requires
|
|
|
|
* the appropriate ETHER_TYPE in hardware filters
|
|
|
|
*/
|
|
|
|
if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
|
|
|
|
efx_spec->efs_ether_type = ether_type_ipv6;
|
|
|
|
} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Ethertype in pattern with IPV6 item should be appropriate");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IPv6 addresses are in big-endian byte order in item and in
|
|
|
|
* efx_spec
|
|
|
|
*/
|
|
|
|
if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
|
|
|
|
sizeof(mask->hdr.src_addr)) == 0) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
|
|
|
|
|
|
|
|
RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
|
|
|
|
sizeof(spec->hdr.src_addr));
|
|
|
|
rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
|
|
|
|
sizeof(efx_spec->efs_rem_host));
|
|
|
|
} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
|
|
|
|
sizeof(mask->hdr.src_addr))) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
|
|
|
|
sizeof(mask->hdr.dst_addr)) == 0) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
|
|
|
|
|
|
|
|
RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
|
|
|
|
sizeof(spec->hdr.dst_addr));
|
|
|
|
rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
|
|
|
|
sizeof(efx_spec->efs_loc_host));
|
|
|
|
} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
|
|
|
|
sizeof(mask->hdr.dst_addr))) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask->hdr.proto == supp_mask.hdr.proto) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
|
|
|
|
efx_spec->efs_ip_proto = spec->hdr.proto;
|
|
|
|
} else if (mask->hdr.proto != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_bad_mask:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Bad mask in the IPV6 pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:31 +00:00
|
|
|
/**
|
|
|
|
* Convert TCP item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only source and destination ports fields
|
|
|
|
* are supported. If the mask is NULL, default mask will be used.
|
|
|
|
* Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_tcp(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:31 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2017-03-09 15:26:31 +00:00
|
|
|
const struct rte_flow_item_tcp *spec = NULL;
|
|
|
|
const struct rte_flow_item_tcp *mask = NULL;
|
|
|
|
const struct rte_flow_item_tcp supp_mask = {
|
|
|
|
.hdr = {
|
|
|
|
.src_port = 0xffff,
|
|
|
|
.dst_port = 0xffff,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_tcp_mask,
|
|
|
|
sizeof(struct rte_flow_item_tcp),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filtering by TCP source and destination ports requires
|
|
|
|
* the appropriate IP_PROTO in hardware filters
|
|
|
|
*/
|
|
|
|
if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
|
|
|
|
efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
|
|
|
|
} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"IP proto in pattern with TCP item should be appropriate");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Source and destination ports are in big-endian byte order in item and
|
|
|
|
* in little-endian in efx_spec, so byte swap is used
|
|
|
|
*/
|
|
|
|
if (mask->hdr.src_port == supp_mask.hdr.src_port) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
|
|
|
|
efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
|
|
|
|
} else if (mask->hdr.src_port != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
|
|
|
|
efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
|
|
|
|
} else if (mask->hdr.dst_port != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_bad_mask:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Bad mask in the TCP pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:32 +00:00
|
|
|
/**
|
|
|
|
* Convert UDP item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only source and destination ports fields
|
|
|
|
* are supported. If the mask is NULL, default mask will be used.
|
|
|
|
* Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_udp(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:32 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2017-03-09 15:26:32 +00:00
|
|
|
const struct rte_flow_item_udp *spec = NULL;
|
|
|
|
const struct rte_flow_item_udp *mask = NULL;
|
|
|
|
const struct rte_flow_item_udp supp_mask = {
|
|
|
|
.hdr = {
|
|
|
|
.src_port = 0xffff,
|
|
|
|
.dst_port = 0xffff,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_udp_mask,
|
|
|
|
sizeof(struct rte_flow_item_udp),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filtering by UDP source and destination ports requires
|
|
|
|
* the appropriate IP_PROTO in hardware filters
|
|
|
|
*/
|
|
|
|
if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
|
|
|
|
efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
|
|
|
|
} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"IP proto in pattern with UDP item should be appropriate");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Source and destination ports are in big-endian byte order in item and
|
|
|
|
* in little-endian in efx_spec, so byte swap is used
|
|
|
|
*/
|
|
|
|
if (mask->hdr.src_port == supp_mask.hdr.src_port) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
|
|
|
|
efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
|
|
|
|
} else if (mask->hdr.src_port != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
|
|
|
|
efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
|
|
|
|
} else if (mask->hdr.dst_port != 0) {
|
|
|
|
goto fail_bad_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_bad_mask:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Bad mask in the UDP pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:50 +00:00
|
|
|
/*
|
|
|
|
* Filters for encapsulated packets match based on the EtherType and IP
|
|
|
|
* protocol in the outer frame.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
|
|
|
|
efx_filter_spec_t *efx_spec,
|
|
|
|
uint8_t ip_proto,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
|
|
|
|
efx_spec->efs_ip_proto = ip_proto;
|
|
|
|
} else if (efx_spec->efs_ip_proto != ip_proto) {
|
|
|
|
switch (ip_proto) {
|
|
|
|
case EFX_IPPROTO_UDP:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Outer IP header protocol must be UDP "
|
2018-03-06 15:24:52 +00:00
|
|
|
"in VxLAN/GENEVE pattern");
|
2018-03-06 15:24:50 +00:00
|
|
|
return -rte_errno;
|
|
|
|
|
2018-03-06 15:24:51 +00:00
|
|
|
case EFX_IPPROTO_GRE:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Outer IP header protocol must be GRE "
|
|
|
|
"in NVGRE pattern");
|
|
|
|
return -rte_errno;
|
|
|
|
|
2018-03-06 15:24:50 +00:00
|
|
|
default:
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
2018-03-06 15:24:52 +00:00
|
|
|
"Only VxLAN/GENEVE/NVGRE tunneling patterns "
|
2018-03-06 15:24:50 +00:00
|
|
|
"are supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
|
|
|
|
efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
|
|
|
|
efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
|
2018-03-06 15:24:50 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Outer frame EtherType in pattern with tunneling "
|
|
|
|
"must be IPv4 or IPv6");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
|
|
|
|
const uint8_t *vni_or_vsid_val,
|
|
|
|
const uint8_t *vni_or_vsid_mask,
|
|
|
|
const struct rte_flow_item *item,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
|
|
|
|
0xff, 0xff, 0xff
|
|
|
|
};
|
|
|
|
|
|
|
|
if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
|
|
|
|
EFX_VNI_OR_VSID_LEN) == 0) {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
|
|
|
|
rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
|
|
|
|
EFX_VNI_OR_VSID_LEN);
|
|
|
|
} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Unsupported VNI/VSID mask");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert VXLAN item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only VXLAN network identifier field is supported.
|
|
|
|
* If the mask is NULL, default mask will be used.
|
|
|
|
* Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_vxlan(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2018-03-06 15:24:50 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2018-03-06 15:24:50 +00:00
|
|
|
const struct rte_flow_item_vxlan *spec = NULL;
|
|
|
|
const struct rte_flow_item_vxlan *mask = NULL;
|
|
|
|
const struct rte_flow_item_vxlan supp_mask = {
|
|
|
|
.vni = { 0xff, 0xff, 0xff }
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_vxlan_mask,
|
|
|
|
sizeof(struct rte_flow_item_vxlan),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
|
|
|
|
EFX_IPPROTO_UDP, error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
|
|
|
|
mask->vni, item, error);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:52 +00:00
|
|
|
/**
|
|
|
|
* Convert GENEVE item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only Virtual Network Identifier and protocol type
|
|
|
|
* fields are supported. But protocol type can be only Ethernet (0x6558).
|
|
|
|
* If the mask is NULL, default mask will be used.
|
|
|
|
* Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_geneve(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2018-03-06 15:24:52 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2018-03-06 15:24:52 +00:00
|
|
|
const struct rte_flow_item_geneve *spec = NULL;
|
|
|
|
const struct rte_flow_item_geneve *mask = NULL;
|
|
|
|
const struct rte_flow_item_geneve supp_mask = {
|
|
|
|
.protocol = RTE_BE16(0xffff),
|
|
|
|
.vni = { 0xff, 0xff, 0xff }
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_geneve_mask,
|
|
|
|
sizeof(struct rte_flow_item_geneve),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
|
|
|
|
EFX_IPPROTO_UDP, error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (mask->protocol == supp_mask.protocol) {
|
2019-05-21 16:13:05 +00:00
|
|
|
if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
|
2018-03-06 15:24:52 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"GENEVE encap. protocol must be Ethernet "
|
|
|
|
"(0x6558) in the GENEVE pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
} else if (mask->protocol != 0) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Unsupported mask for GENEVE encap. protocol");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
|
|
|
|
mask->vni, item, error);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:51 +00:00
|
|
|
/**
|
|
|
|
* Convert NVGRE item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification. Only virtual subnet ID field is supported.
|
|
|
|
* If the mask is NULL, default mask will be used.
|
|
|
|
* Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_nvgre(const struct rte_flow_item *item,
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2018-03-06 15:24:51 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:53 +00:00
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
2018-03-06 15:24:51 +00:00
|
|
|
const struct rte_flow_item_nvgre *spec = NULL;
|
|
|
|
const struct rte_flow_item_nvgre *mask = NULL;
|
|
|
|
const struct rte_flow_item_nvgre supp_mask = {
|
|
|
|
.tni = { 0xff, 0xff, 0xff }
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&rte_flow_item_nvgre_mask,
|
|
|
|
sizeof(struct rte_flow_item_nvgre),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
|
|
|
|
EFX_IPPROTO_GRE, error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
|
|
|
|
|
|
|
|
if (spec == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
|
|
|
|
mask->tni, item, error);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-03-16 12:51:44 +00:00
|
|
|
/**
|
|
|
|
* Convert PPPoEx item to EFX filter specification.
|
|
|
|
*
|
|
|
|
* @param item[in]
|
|
|
|
* Item specification.
|
|
|
|
* Matching on PPPoEx fields is not supported.
|
|
|
|
* This item can only be used to set or validate the EtherType filter.
|
|
|
|
* Only zero masks are allowed.
|
|
|
|
* Ranging is not supported.
|
|
|
|
* @param efx_spec[in, out]
|
|
|
|
* EFX filter specification to update.
|
|
|
|
* @param[out] error
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_pppoex(const struct rte_flow_item *item,
|
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
efx_filter_spec_t *efx_spec = parse_ctx->filter;
|
|
|
|
const struct rte_flow_item_pppoe *spec = NULL;
|
|
|
|
const struct rte_flow_item_pppoe *mask = NULL;
|
|
|
|
const struct rte_flow_item_pppoe supp_mask = {};
|
|
|
|
const struct rte_flow_item_pppoe def_mask = {};
|
|
|
|
uint16_t ether_type;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_init(item,
|
|
|
|
(const void **)&spec,
|
|
|
|
(const void **)&mask,
|
|
|
|
&supp_mask,
|
|
|
|
&def_mask,
|
|
|
|
sizeof(struct rte_flow_item_pppoe),
|
|
|
|
error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
|
|
|
|
ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
|
|
|
|
else
|
|
|
|
ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
|
|
|
|
|
|
|
|
if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
|
|
|
|
if (efx_spec->efs_ether_type != ether_type) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
|
|
|
"Invalid EtherType for a PPPoE flow item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
|
|
|
|
efx_spec->efs_ether_type = ether_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
static const struct sfc_flow_item sfc_flow_items[] = {
|
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_VOID,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "VOID",
|
2017-03-09 15:26:27 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
|
|
|
|
.layer = SFC_FLOW_ITEM_ANY_LAYER,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:27 +00:00
|
|
|
.parse = sfc_flow_parse_void,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_ETH,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "ETH",
|
2017-03-09 15:26:27 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_START_LAYER,
|
|
|
|
.layer = SFC_FLOW_ITEM_L2,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:27 +00:00
|
|
|
.parse = sfc_flow_parse_eth,
|
|
|
|
},
|
2017-03-09 15:26:28 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_VLAN,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "VLAN",
|
2017-03-09 15:26:28 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.layer = SFC_FLOW_ITEM_L2,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:28 +00:00
|
|
|
.parse = sfc_flow_parse_vlan,
|
|
|
|
},
|
2021-03-16 12:51:44 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_PPPOED,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "PPPOED",
|
2021-03-16 12:51:44 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
|
|
|
.parse = sfc_flow_parse_pppoex,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_PPPOES,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "PPPOES",
|
2021-03-16 12:51:44 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
|
|
|
.parse = sfc_flow_parse_pppoex,
|
|
|
|
},
|
2017-03-09 15:26:29 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_IPV4,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "IPV4",
|
2017-03-09 15:26:29 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.layer = SFC_FLOW_ITEM_L3,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:29 +00:00
|
|
|
.parse = sfc_flow_parse_ipv4,
|
|
|
|
},
|
2017-03-09 15:26:30 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_IPV6,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "IPV6",
|
2017-03-09 15:26:30 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L2,
|
|
|
|
.layer = SFC_FLOW_ITEM_L3,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:30 +00:00
|
|
|
.parse = sfc_flow_parse_ipv6,
|
|
|
|
},
|
2017-03-09 15:26:31 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_TCP,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "TCP",
|
2017-03-09 15:26:31 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L3,
|
|
|
|
.layer = SFC_FLOW_ITEM_L4,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:31 +00:00
|
|
|
.parse = sfc_flow_parse_tcp,
|
|
|
|
},
|
2017-03-09 15:26:32 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_UDP,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "UDP",
|
2017-03-09 15:26:32 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L3,
|
|
|
|
.layer = SFC_FLOW_ITEM_L4,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2017-03-09 15:26:32 +00:00
|
|
|
.parse = sfc_flow_parse_udp,
|
|
|
|
},
|
2018-03-06 15:24:50 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_VXLAN,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "VXLAN",
|
2018-03-06 15:24:50 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L4,
|
|
|
|
.layer = SFC_FLOW_ITEM_START_LAYER,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2018-03-06 15:24:50 +00:00
|
|
|
.parse = sfc_flow_parse_vxlan,
|
|
|
|
},
|
2018-03-06 15:24:52 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_GENEVE,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "GENEVE",
|
2018-03-06 15:24:52 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L4,
|
|
|
|
.layer = SFC_FLOW_ITEM_START_LAYER,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2018-03-06 15:24:52 +00:00
|
|
|
.parse = sfc_flow_parse_geneve,
|
|
|
|
},
|
2018-03-06 15:24:51 +00:00
|
|
|
{
|
|
|
|
.type = RTE_FLOW_ITEM_TYPE_NVGRE,
|
2021-07-21 04:15:05 +00:00
|
|
|
.name = "NVGRE",
|
2018-03-06 15:24:51 +00:00
|
|
|
.prev_layer = SFC_FLOW_ITEM_L3,
|
|
|
|
.layer = SFC_FLOW_ITEM_START_LAYER,
|
2020-03-05 10:47:53 +00:00
|
|
|
.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
|
2018-03-06 15:24:51 +00:00
|
|
|
.parse = sfc_flow_parse_nvgre,
|
|
|
|
},
|
2017-03-09 15:26:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protocol-independent flow API support
|
|
|
|
*/
|
|
|
|
static int
|
2020-10-20 09:12:47 +00:00
|
|
|
sfc_flow_parse_attr(struct sfc_adapter *sa,
|
|
|
|
const struct rte_flow_attr *attr,
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2020-10-20 09:12:47 +00:00
|
|
|
struct sfc_flow_spec_mae *spec_mae = &spec->mae;
|
|
|
|
struct sfc_mae *mae = &sa->mae;
|
2020-03-05 10:47:49 +00:00
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
if (attr == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR, NULL,
|
|
|
|
"NULL attribute");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (attr->group != 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
|
|
|
|
"Groups are not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2021-10-25 11:04:15 +00:00
|
|
|
if (attr->egress != 0 && attr->transfer == 0) {
|
2017-03-09 15:26:27 +00:00
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
|
|
|
|
"Egress is not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2021-10-25 11:04:15 +00:00
|
|
|
if (attr->ingress == 0 && attr->transfer == 0) {
|
2018-04-25 15:28:01 +00:00
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
2020-03-05 10:47:51 +00:00
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
|
|
|
|
"Ingress is compulsory");
|
2018-04-25 15:28:01 +00:00
|
|
|
return -rte_errno;
|
|
|
|
}
|
2020-03-05 10:47:51 +00:00
|
|
|
if (attr->transfer == 0) {
|
|
|
|
if (attr->priority != 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
|
|
|
|
attr, "Priorities are unsupported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
spec->type = SFC_FLOW_SPEC_FILTER;
|
|
|
|
spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
|
|
|
|
spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
|
2020-03-10 09:48:35 +00:00
|
|
|
spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
|
2020-03-05 10:47:51 +00:00
|
|
|
} else {
|
2021-10-15 06:49:02 +00:00
|
|
|
if (mae->status != SFC_MAE_STATUS_ADMIN) {
|
2020-10-20 09:12:47 +00:00
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
|
|
|
|
attr, "Transfer is not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (attr->priority > mae->nb_action_rule_prios_max) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
|
|
|
|
attr, "Unsupported priority level");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
spec->type = SFC_FLOW_SPEC_MAE;
|
|
|
|
spec_mae->priority = attr->priority;
|
2020-10-20 09:12:49 +00:00
|
|
|
spec_mae->match_spec = NULL;
|
2020-10-20 09:12:55 +00:00
|
|
|
spec_mae->action_set = NULL;
|
2020-10-20 09:13:04 +00:00
|
|
|
spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get item from array sfc_flow_items */
|
|
|
|
static const struct sfc_flow_item *
|
2020-03-05 10:47:53 +00:00
|
|
|
sfc_flow_get_item(const struct sfc_flow_item *items,
|
|
|
|
unsigned int nb_items,
|
|
|
|
enum rte_flow_item_type type)
|
2017-03-09 15:26:27 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-03-05 10:47:53 +00:00
|
|
|
for (i = 0; i < nb_items; i++)
|
|
|
|
if (items[i].type == type)
|
|
|
|
return &items[i];
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:53 +00:00
|
|
|
int
|
2021-07-21 04:15:05 +00:00
|
|
|
sfc_flow_parse_pattern(struct sfc_adapter *sa,
|
|
|
|
const struct sfc_flow_item *flow_items,
|
2020-03-05 10:47:53 +00:00
|
|
|
unsigned int nb_flow_items,
|
|
|
|
const struct rte_flow_item pattern[],
|
|
|
|
struct sfc_flow_parse_ctx *parse_ctx,
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
|
2018-03-06 15:24:50 +00:00
|
|
|
boolean_t is_ifrm = B_FALSE;
|
2017-03-09 15:26:27 +00:00
|
|
|
const struct sfc_flow_item *item;
|
|
|
|
|
|
|
|
if (pattern == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
|
|
|
|
"NULL pattern");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2017-04-18 12:51:01 +00:00
|
|
|
for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
|
2020-03-05 10:47:53 +00:00
|
|
|
item = sfc_flow_get_item(flow_items, nb_flow_items,
|
|
|
|
pattern->type);
|
2017-03-09 15:26:27 +00:00
|
|
|
if (item == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, pattern,
|
|
|
|
"Unsupported pattern item");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Omitting one or several protocol layers at the beginning
|
|
|
|
* of pattern is supported
|
|
|
|
*/
|
|
|
|
if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
|
|
|
|
prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
|
|
|
|
item->prev_layer != prev_layer) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, pattern,
|
|
|
|
"Unexpected sequence of pattern items");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:50 +00:00
|
|
|
/*
|
2018-03-06 15:24:53 +00:00
|
|
|
* Allow only VOID and ETH pattern items in the inner frame.
|
2018-03-06 15:24:50 +00:00
|
|
|
* Also check that there is only one tunneling protocol.
|
|
|
|
*/
|
|
|
|
switch (item->type) {
|
|
|
|
case RTE_FLOW_ITEM_TYPE_VOID:
|
2018-03-06 15:24:53 +00:00
|
|
|
case RTE_FLOW_ITEM_TYPE_ETH:
|
2018-03-06 15:24:50 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RTE_FLOW_ITEM_TYPE_VXLAN:
|
2018-03-06 15:24:52 +00:00
|
|
|
case RTE_FLOW_ITEM_TYPE_GENEVE:
|
2018-03-06 15:24:51 +00:00
|
|
|
case RTE_FLOW_ITEM_TYPE_NVGRE:
|
2018-03-06 15:24:50 +00:00
|
|
|
if (is_ifrm) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
pattern,
|
|
|
|
"More than one tunneling protocol");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
is_ifrm = B_TRUE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-10-20 09:13:41 +00:00
|
|
|
if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
|
|
|
|
is_ifrm) {
|
2018-03-06 15:24:50 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
|
|
|
pattern,
|
|
|
|
"There is an unsupported pattern item "
|
|
|
|
"in the inner frame");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:53 +00:00
|
|
|
if (parse_ctx->type != item->ctx_type) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ITEM, pattern,
|
|
|
|
"Parse context type mismatch");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = item->parse(pattern, parse_ctx, error);
|
2021-07-21 04:15:05 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
sfc_err(sa, "failed to parse item %s: %s",
|
|
|
|
item->name, strerror(-rc));
|
2017-03-09 15:26:27 +00:00
|
|
|
return rc;
|
2021-07-21 04:15:05 +00:00
|
|
|
}
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
|
|
|
|
prev_layer = item->layer;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_parse_queue(struct sfc_adapter *sa,
|
|
|
|
const struct rte_flow_action_queue *queue,
|
|
|
|
struct rte_flow *flow)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2017-03-09 15:26:27 +00:00
|
|
|
struct sfc_rxq *rxq;
|
2020-09-24 12:40:59 +00:00
|
|
|
struct sfc_rxq_info *rxq_info;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2021-07-02 08:39:29 +00:00
|
|
|
if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
|
2017-03-09 15:26:27 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-07-02 08:39:29 +00:00
|
|
|
rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2020-09-24 12:40:59 +00:00
|
|
|
rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
|
|
|
|
spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
|
|
|
|
SFC_RXQ_FLAG_RSS_HASH);
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-30 18:17:41 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_parse_rss(struct sfc_adapter *sa,
|
2018-04-25 17:51:43 +00:00
|
|
|
const struct rte_flow_action_rss *action_rss,
|
2017-08-30 18:17:41 +00:00
|
|
|
struct rte_flow *flow)
|
|
|
|
{
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
|
|
|
|
struct sfc_rss *rss = &sas->rss;
|
2021-07-02 08:39:29 +00:00
|
|
|
sfc_ethdev_qid_t ethdev_qid;
|
2017-08-30 18:17:41 +00:00
|
|
|
struct sfc_rxq *rxq;
|
|
|
|
unsigned int rxq_hw_index_min;
|
|
|
|
unsigned int rxq_hw_index_max;
|
2018-04-25 17:51:44 +00:00
|
|
|
efx_rx_hash_type_t efx_hash_types;
|
2018-04-25 15:27:50 +00:00
|
|
|
const uint8_t *rss_key;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
|
|
|
struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
|
2017-08-30 18:17:41 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (action_rss->queue_num == 0)
|
2017-08-30 18:17:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-07-02 08:39:29 +00:00
|
|
|
ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
|
|
|
|
rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
|
2017-08-30 18:17:41 +00:00
|
|
|
rxq_hw_index_min = rxq->hw_index;
|
|
|
|
rxq_hw_index_max = 0;
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
for (i = 0; i < action_rss->queue_num; ++i) {
|
2021-07-02 08:39:29 +00:00
|
|
|
ethdev_qid = action_rss->queue[i];
|
2017-08-30 18:17:41 +00:00
|
|
|
|
2021-07-02 08:39:29 +00:00
|
|
|
if ((unsigned int)ethdev_qid >=
|
|
|
|
sfc_sa2shared(sa)->ethdev_rxq_count)
|
2017-08-30 18:17:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-07-02 08:39:29 +00:00
|
|
|
rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
if (rxq->hw_index < rxq_hw_index_min)
|
|
|
|
rxq_hw_index_min = rxq->hw_index;
|
|
|
|
|
|
|
|
if (rxq->hw_index > rxq_hw_index_max)
|
|
|
|
rxq_hw_index_max = rxq->hw_index;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
switch (action_rss->func) {
|
2018-04-25 15:27:52 +00:00
|
|
|
case RTE_ETH_HASH_FUNCTION_DEFAULT:
|
|
|
|
case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (action_rss->level)
|
2018-04-25 15:27:54 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-04-26 16:48:57 +00:00
|
|
|
/*
|
|
|
|
* Dummy RSS action with only one queue and no specific settings
|
|
|
|
* for hash types and key does not require dedicated RSS context
|
|
|
|
* and may be simplified to single queue action.
|
|
|
|
*/
|
|
|
|
if (action_rss->queue_num == 1 && action_rss->types == 0 &&
|
|
|
|
action_rss->key_len == 0) {
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
|
2018-04-26 16:48:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:44 +00:00
|
|
|
if (action_rss->types) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
|
|
|
|
&efx_hash_types);
|
|
|
|
if (rc != 0)
|
|
|
|
return -rc;
|
|
|
|
} else {
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
efx_hash_types = 0;
|
|
|
|
for (i = 0; i < rss->hf_map_nb_entries; ++i)
|
|
|
|
efx_hash_types |= rss->hf_map[i].efx;
|
|
|
|
}
|
2017-08-30 18:17:41 +00:00
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (action_rss->key_len) {
|
|
|
|
if (action_rss->key_len != sizeof(rss->key))
|
2017-08-30 18:17:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
rss_key = action_rss->key;
|
2017-08-30 18:17:41 +00:00
|
|
|
} else {
|
2018-04-25 17:51:43 +00:00
|
|
|
rss_key = rss->key;
|
2017-08-30 18:17:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->rss = B_TRUE;
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
|
|
|
|
sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
|
2018-04-25 17:51:44 +00:00
|
|
|
sfc_rss_conf->rss_hash_types = efx_hash_types;
|
2018-04-25 17:51:43 +00:00
|
|
|
rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
|
2018-04-25 17:51:43 +00:00
|
|
|
unsigned int nb_queues = action_rss->queue_num;
|
2021-07-02 08:39:29 +00:00
|
|
|
struct sfc_rxq *rxq;
|
2017-08-30 18:17:41 +00:00
|
|
|
|
2021-07-02 08:39:29 +00:00
|
|
|
ethdev_qid = action_rss->queue[i % nb_queues];
|
|
|
|
rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
|
2017-08-30 18:17:41 +00:00
|
|
|
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:54 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
|
|
|
|
unsigned int filters_count)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:54 +00:00
|
|
|
unsigned int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < filters_count; i++) {
|
|
|
|
int rc;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
|
2018-03-06 15:24:54 +00:00
|
|
|
if (ret == 0 && rc != 0) {
|
|
|
|
sfc_err(sa, "failed to remove filter specification "
|
|
|
|
"(rc = %d)", rc);
|
|
|
|
ret = rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:54 +00:00
|
|
|
unsigned int i;
|
|
|
|
int rc = 0;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
|
|
|
rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
|
2018-03-06 15:24:54 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
sfc_flow_spec_flush(sa, spec, i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
|
|
|
|
|
|
|
return sfc_flow_spec_flush(sa, spec, spec_filter->count);
|
2018-03-06 15:24:54 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 18:17:41 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_filter_insert(struct sfc_adapter *sa,
|
|
|
|
struct rte_flow *flow)
|
|
|
|
{
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
|
|
|
|
struct sfc_rss *rss = &sas->rss;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
|
|
|
|
struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
|
2018-03-06 15:24:54 +00:00
|
|
|
uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
|
2020-09-24 12:40:59 +00:00
|
|
|
boolean_t create_context;
|
2018-03-06 15:24:54 +00:00
|
|
|
unsigned int i;
|
2017-08-30 18:17:41 +00:00
|
|
|
int rc = 0;
|
|
|
|
|
2020-09-24 12:40:59 +00:00
|
|
|
create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
|
|
|
|
rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
|
|
|
|
|
|
|
|
if (create_context) {
|
|
|
|
unsigned int rss_spread;
|
|
|
|
unsigned int rss_hash_types;
|
|
|
|
uint8_t *rss_key;
|
|
|
|
|
|
|
|
if (spec_filter->rss) {
|
|
|
|
rss_spread = MIN(flow_rss->rxq_hw_index_max -
|
|
|
|
flow_rss->rxq_hw_index_min + 1,
|
|
|
|
EFX_MAXRSS);
|
|
|
|
rss_hash_types = flow_rss->rss_hash_types;
|
|
|
|
rss_key = flow_rss->rss_key;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Initialize dummy RSS context parameters to have
|
|
|
|
* valid RSS hash. Use default RSS hash function and
|
|
|
|
* key.
|
|
|
|
*/
|
|
|
|
rss_spread = 1;
|
|
|
|
rss_hash_types = rss->hash_types;
|
|
|
|
rss_key = rss->key;
|
|
|
|
}
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
rc = efx_rx_scale_context_alloc(sa->nic,
|
|
|
|
EFX_RX_SCALE_EXCLUSIVE,
|
|
|
|
rss_spread,
|
2018-03-06 15:24:54 +00:00
|
|
|
&efs_rss_context);
|
2017-08-30 18:17:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_context_alloc;
|
|
|
|
|
2018-03-06 15:24:54 +00:00
|
|
|
rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
|
2018-04-25 17:51:44 +00:00
|
|
|
rss->hash_alg,
|
2020-09-24 12:40:59 +00:00
|
|
|
rss_hash_types, B_TRUE);
|
2017-08-30 18:17:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_mode_set;
|
|
|
|
|
2018-03-06 15:24:54 +00:00
|
|
|
rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
|
2020-09-24 12:40:59 +00:00
|
|
|
rss_key, sizeof(rss->key));
|
2017-08-30 18:17:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_key_set;
|
2020-09-24 12:40:59 +00:00
|
|
|
} else {
|
|
|
|
efs_rss_context = rss->dummy_rss_context;
|
|
|
|
}
|
2017-08-30 18:17:41 +00:00
|
|
|
|
2020-09-24 12:40:59 +00:00
|
|
|
if (spec_filter->rss || spec_filter->rss_hash_required) {
|
2018-03-06 15:24:54 +00:00
|
|
|
/*
|
|
|
|
* At this point, fully elaborated filter specifications
|
|
|
|
* have been produced from the template. To make sure that
|
|
|
|
* RSS behaviour is consistent between them, set the same
|
|
|
|
* RSS context value everywhere.
|
|
|
|
*/
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
|
|
|
efx_filter_spec_t *spec = &spec_filter->filters[i];
|
2018-03-06 15:24:54 +00:00
|
|
|
|
|
|
|
spec->efs_rss_context = efs_rss_context;
|
|
|
|
spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
|
2020-09-24 12:40:59 +00:00
|
|
|
if (spec_filter->rss)
|
|
|
|
spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
|
2018-03-06 15:24:54 +00:00
|
|
|
}
|
2017-08-30 18:17:41 +00:00
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:54 +00:00
|
|
|
rc = sfc_flow_spec_insert(sa, &flow->spec);
|
2017-08-30 18:17:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_filter_insert;
|
|
|
|
|
2020-09-24 12:40:59 +00:00
|
|
|
if (create_context) {
|
|
|
|
unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
|
|
|
|
unsigned int *tbl;
|
|
|
|
|
|
|
|
tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
|
|
|
|
|
2017-08-30 18:17:41 +00:00
|
|
|
/*
|
|
|
|
* Scale table is set after filter insertion because
|
|
|
|
* the table entries are relative to the base RxQ ID
|
|
|
|
* and the latter is submitted to the HW by means of
|
|
|
|
* inserting a filter, so by the time of the request
|
|
|
|
* the HW knows all the information needed to verify
|
|
|
|
* the table entries, and the operation will succeed
|
|
|
|
*/
|
2018-03-06 15:24:54 +00:00
|
|
|
rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
|
2020-09-24 12:40:59 +00:00
|
|
|
tbl, RTE_DIM(flow_rss->rss_tbl));
|
2017-08-30 18:17:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_tbl_set;
|
2020-09-24 12:40:59 +00:00
|
|
|
|
|
|
|
/* Remember created dummy RSS context */
|
|
|
|
if (!spec_filter->rss)
|
|
|
|
rss->dummy_rss_context = efs_rss_context;
|
2017-08-30 18:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_scale_tbl_set:
|
2018-03-06 15:24:54 +00:00
|
|
|
sfc_flow_spec_remove(sa, &flow->spec);
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
fail_filter_insert:
|
|
|
|
fail_scale_key_set:
|
|
|
|
fail_scale_mode_set:
|
2020-09-24 12:40:59 +00:00
|
|
|
if (create_context)
|
2018-03-06 15:24:54 +00:00
|
|
|
efx_rx_scale_context_free(sa->nic, efs_rss_context);
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
fail_scale_context_alloc:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_filter_remove(struct sfc_adapter *sa,
|
|
|
|
struct rte_flow *flow)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
|
2017-08-30 18:17:41 +00:00
|
|
|
int rc = 0;
|
|
|
|
|
2018-03-06 15:24:54 +00:00
|
|
|
rc = sfc_flow_spec_remove(sa, &flow->spec);
|
2017-08-30 18:17:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
if (spec_filter->rss) {
|
2018-03-06 15:24:54 +00:00
|
|
|
/*
|
|
|
|
* All specifications for a given flow rule have the same RSS
|
|
|
|
* context, so that RSS context value is taken from the first
|
|
|
|
* filter specification
|
|
|
|
*/
|
2020-03-05 10:47:49 +00:00
|
|
|
efx_filter_spec_t *spec = &spec_filter->filters[0];
|
2018-03-06 15:24:54 +00:00
|
|
|
|
2017-08-30 18:17:41 +00:00
|
|
|
rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
|
2018-03-06 15:24:54 +00:00
|
|
|
}
|
2017-08-30 18:17:41 +00:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-04-19 11:37:05 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_parse_mark(struct sfc_adapter *sa,
|
|
|
|
const struct rte_flow_action_mark *mark,
|
|
|
|
struct rte_flow *flow)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-04-19 11:37:05 +00:00
|
|
|
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
|
2021-10-13 13:15:04 +00:00
|
|
|
uint32_t mark_max;
|
2018-04-19 11:37:05 +00:00
|
|
|
|
2021-10-13 13:15:04 +00:00
|
|
|
mark_max = encp->enc_filter_action_mark_max;
|
|
|
|
if (sfc_flow_tunnel_is_active(sa))
|
|
|
|
mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
|
|
|
|
|
|
|
|
if (mark == NULL || mark->id > mark_max)
|
2018-04-19 11:37:05 +00:00
|
|
|
return EINVAL;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
|
|
|
|
spec_filter->template.efs_mark = mark->id;
|
2018-04-19 11:37:05 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_parse_actions(struct sfc_adapter *sa,
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
int rc;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2019-02-07 12:17:31 +00:00
|
|
|
const unsigned int dp_rx_features = sa->priv.dp_rx->features;
|
2021-10-12 19:46:17 +00:00
|
|
|
const uint64_t rx_metadata = sa->negotiated_rx_metadata;
|
2018-04-19 11:37:04 +00:00
|
|
|
uint32_t actions_set = 0;
|
|
|
|
const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
|
|
|
|
(1UL << RTE_FLOW_ACTION_TYPE_RSS) |
|
|
|
|
(1UL << RTE_FLOW_ACTION_TYPE_DROP);
|
2018-04-19 11:37:05 +00:00
|
|
|
const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
|
|
|
|
(1UL << RTE_FLOW_ACTION_TYPE_FLAG);
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
if (actions == NULL) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
|
|
|
|
"NULL actions");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
|
|
|
|
switch (actions->type) {
|
2018-04-19 11:37:04 +00:00
|
|
|
case RTE_FLOW_ACTION_TYPE_VOID:
|
|
|
|
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
|
|
|
|
actions_set);
|
|
|
|
break;
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
case RTE_FLOW_ACTION_TYPE_QUEUE:
|
2018-04-19 11:37:04 +00:00
|
|
|
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
|
|
|
|
actions_set);
|
|
|
|
if ((actions_set & fate_actions_mask) != 0)
|
|
|
|
goto fail_fate_actions;
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
rc = sfc_flow_parse_queue(sa, actions->conf, flow);
|
|
|
|
if (rc != 0) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
|
|
|
"Bad QUEUE action");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2017-08-30 18:17:41 +00:00
|
|
|
case RTE_FLOW_ACTION_TYPE_RSS:
|
2018-04-19 11:37:04 +00:00
|
|
|
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
|
|
|
|
actions_set);
|
|
|
|
if ((actions_set & fate_actions_mask) != 0)
|
|
|
|
goto fail_fate_actions;
|
|
|
|
|
2017-08-30 18:17:41 +00:00
|
|
|
rc = sfc_flow_parse_rss(sa, actions->conf, flow);
|
|
|
|
if (rc != 0) {
|
2018-05-16 14:21:23 +00:00
|
|
|
rte_flow_error_set(error, -rc,
|
2017-08-30 18:17:41 +00:00
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
|
|
|
"Bad RSS action");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2018-02-27 13:11:19 +00:00
|
|
|
case RTE_FLOW_ACTION_TYPE_DROP:
|
2018-04-19 11:37:04 +00:00
|
|
|
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
|
|
|
|
actions_set);
|
|
|
|
if ((actions_set & fate_actions_mask) != 0)
|
|
|
|
goto fail_fate_actions;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->template.efs_dmaq_id =
|
2018-02-27 13:11:19 +00:00
|
|
|
EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
|
|
|
|
break;
|
|
|
|
|
2018-04-19 11:37:05 +00:00
|
|
|
case RTE_FLOW_ACTION_TYPE_FLAG:
|
|
|
|
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
|
|
|
|
actions_set);
|
|
|
|
if ((actions_set & mark_actions_mask) != 0)
|
|
|
|
goto fail_actions_overlap;
|
|
|
|
|
|
|
|
if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
|
|
|
|
"FLAG action is not supported on the current Rx datapath");
|
|
|
|
return -rte_errno;
|
2021-10-12 19:46:17 +00:00
|
|
|
} else if ((rx_metadata &
|
|
|
|
RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
|
|
|
|
"flag delivery has not been negotiated");
|
|
|
|
return -rte_errno;
|
2018-04-19 11:37:05 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->template.efs_flags |=
|
2018-04-19 11:37:05 +00:00
|
|
|
EFX_FILTER_FLAG_ACTION_FLAG;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RTE_FLOW_ACTION_TYPE_MARK:
|
|
|
|
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
|
|
|
|
actions_set);
|
|
|
|
if ((actions_set & mark_actions_mask) != 0)
|
|
|
|
goto fail_actions_overlap;
|
|
|
|
|
|
|
|
if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
|
|
|
|
"MARK action is not supported on the current Rx datapath");
|
|
|
|
return -rte_errno;
|
2021-10-12 19:46:17 +00:00
|
|
|
} else if ((rx_metadata &
|
|
|
|
RTE_ETH_RX_METADATA_USER_MARK) == 0) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
|
|
|
|
"mark delivery has not been negotiated");
|
|
|
|
return -rte_errno;
|
2018-04-19 11:37:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_mark(sa, actions->conf, flow);
|
|
|
|
if (rc != 0) {
|
|
|
|
rte_flow_error_set(error, rc,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
|
|
|
"Bad MARK action");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
default:
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
|
|
|
"Action is not supported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2018-04-19 11:37:04 +00:00
|
|
|
|
|
|
|
actions_set |= (1UL << actions->type);
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 15:27:46 +00:00
|
|
|
/* When fate is unknown, drop traffic. */
|
2018-04-19 11:37:04 +00:00
|
|
|
if ((actions_set & fate_actions_mask) == 0) {
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->template.efs_dmaq_id =
|
2018-04-25 15:27:46 +00:00
|
|
|
EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-04-19 11:37:04 +00:00
|
|
|
|
|
|
|
fail_fate_actions:
|
|
|
|
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
|
|
|
"Cannot combine several fate-deciding actions, "
|
|
|
|
"choose between QUEUE, RSS or DROP");
|
|
|
|
return -rte_errno;
|
2018-04-19 11:37:05 +00:00
|
|
|
|
|
|
|
fail_actions_overlap:
|
|
|
|
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
|
|
|
"Overlapping actions are not supported");
|
|
|
|
return -rte_errno;
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:57 +00:00
|
|
|
/**
|
|
|
|
* Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
|
|
|
|
* and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
|
|
|
|
* specifications after copying.
|
|
|
|
*
|
|
|
|
* @param spec[in, out]
|
|
|
|
* SFC flow specification to update.
|
|
|
|
* @param filters_count_for_one_val[in]
|
|
|
|
* How many specifications should have the same match flag, what is the
|
|
|
|
* number of specifications before copying.
|
|
|
|
* @param error[out]
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
|
|
|
|
unsigned int filters_count_for_one_val,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:57 +00:00
|
|
|
static const efx_filter_match_flags_t vals[] = {
|
|
|
|
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
|
|
|
|
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
|
|
|
|
};
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
|
2018-03-06 15:24:57 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Number of specifications is incorrect while copying "
|
|
|
|
"by unknown destination flags");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
2018-03-06 15:24:57 +00:00
|
|
|
/* The check above ensures that divisor can't be zero here */
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->filters[i].efs_match_flags |=
|
2018-03-06 15:24:57 +00:00
|
|
|
vals[i / filters_count_for_one_val];
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check that the following conditions are met:
|
|
|
|
* - the list of supported filters has a filter
|
|
|
|
* with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
|
|
|
|
* EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
|
|
|
|
* be inserted.
|
|
|
|
*
|
|
|
|
* @param match[in]
|
|
|
|
* The match flags of filter.
|
|
|
|
* @param spec[in]
|
|
|
|
* Specification to be supplemented.
|
|
|
|
* @param filter[in]
|
|
|
|
* SFC filter with list of supported filters.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
|
|
|
|
__rte_unused efx_filter_spec_t *spec,
|
|
|
|
struct sfc_filter *filter)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
efx_filter_match_flags_t match_mcast_dst;
|
|
|
|
|
|
|
|
match_mcast_dst =
|
|
|
|
(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
|
|
|
|
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
|
|
|
|
for (i = 0; i < filter->supported_match_num; i++) {
|
|
|
|
if (match_mcast_dst == filter->supported_match[i])
|
|
|
|
return B_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_FALSE;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
/**
|
|
|
|
* Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
|
|
|
|
* EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
|
|
|
|
* specifications after copying.
|
|
|
|
*
|
|
|
|
* @param spec[in, out]
|
|
|
|
* SFC flow specification to update.
|
|
|
|
* @param filters_count_for_one_val[in]
|
|
|
|
* How many specifications should have the same EtherType value, what is the
|
|
|
|
* number of specifications before copying.
|
|
|
|
* @param error[out]
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
|
|
|
|
unsigned int filters_count_for_one_val,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:55 +00:00
|
|
|
static const uint16_t vals[] = {
|
|
|
|
EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
|
|
|
|
};
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
|
2018-03-06 15:24:55 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Number of specifications is incorrect "
|
|
|
|
"while copying by Ethertype");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
|
|
|
spec_filter->filters[i].efs_match_flags |=
|
2018-03-06 15:24:55 +00:00
|
|
|
EFX_FILTER_MATCH_ETHER_TYPE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The check above ensures that
|
|
|
|
* filters_count_for_one_val is not 0
|
|
|
|
*/
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->filters[i].efs_ether_type =
|
2018-03-06 15:24:55 +00:00
|
|
|
vals[i / filters_count_for_one_val];
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-14 07:38:59 +00:00
|
|
|
/**
|
|
|
|
* Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
|
|
|
|
* in the same specifications after copying.
|
|
|
|
*
|
|
|
|
* @param spec[in, out]
|
|
|
|
* SFC flow specification to update.
|
|
|
|
* @param filters_count_for_one_val[in]
|
|
|
|
* How many specifications should have the same match flag, what is the
|
|
|
|
* number of specifications before copying.
|
|
|
|
* @param error[out]
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
|
|
|
|
unsigned int filters_count_for_one_val,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-07-14 07:38:59 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
if (filters_count_for_one_val != spec_filter->count) {
|
2018-07-14 07:38:59 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Number of specifications is incorrect "
|
|
|
|
"while copying by outer VLAN ID");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
|
|
|
spec_filter->filters[i].efs_match_flags |=
|
2018-07-14 07:38:59 +00:00
|
|
|
EFX_FILTER_MATCH_OUTER_VID;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->filters[i].efs_outer_vid = 0;
|
2018-07-14 07:38:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:56 +00:00
|
|
|
/**
|
|
|
|
* Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
|
|
|
|
* EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
|
|
|
|
* specifications after copying.
|
|
|
|
*
|
|
|
|
* @param spec[in, out]
|
|
|
|
* SFC flow specification to update.
|
|
|
|
* @param filters_count_for_one_val[in]
|
|
|
|
* How many specifications should have the same match flag, what is the
|
|
|
|
* number of specifications before copying.
|
|
|
|
* @param error[out]
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
|
|
|
|
unsigned int filters_count_for_one_val,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:56 +00:00
|
|
|
static const efx_filter_match_flags_t vals[] = {
|
|
|
|
EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
|
|
|
|
EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
|
|
|
|
};
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
|
2018-03-06 15:24:56 +00:00
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Number of specifications is incorrect while copying "
|
|
|
|
"by inner frame unknown destination flags");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
2018-03-06 15:24:56 +00:00
|
|
|
/* The check above ensures that divisor can't be zero here */
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->filters[i].efs_match_flags |=
|
2018-03-06 15:24:56 +00:00
|
|
|
vals[i / filters_count_for_one_val];
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check that the following conditions are met:
|
|
|
|
* - the specification corresponds to a filter for encapsulated traffic
|
|
|
|
* - the list of supported filters has a filter
|
|
|
|
* with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
|
|
|
|
* EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
|
|
|
|
* be inserted.
|
|
|
|
*
|
|
|
|
* @param match[in]
|
|
|
|
* The match flags of filter.
|
|
|
|
* @param spec[in]
|
|
|
|
* Specification to be supplemented.
|
|
|
|
* @param filter[in]
|
|
|
|
* SFC filter with list of supported filters.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
|
|
|
|
efx_filter_spec_t *spec,
|
|
|
|
struct sfc_filter *filter)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
|
|
|
|
efx_filter_match_flags_t match_mcast_dst;
|
|
|
|
|
|
|
|
if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
|
|
|
|
return B_FALSE;
|
|
|
|
|
|
|
|
match_mcast_dst =
|
|
|
|
(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
|
|
|
|
EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
|
|
|
|
for (i = 0; i < filter->supported_match_num; i++) {
|
|
|
|
if (match_mcast_dst == filter->supported_match[i])
|
|
|
|
return B_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_FALSE;
|
|
|
|
}
|
|
|
|
|
2018-07-14 07:38:59 +00:00
|
|
|
/**
|
|
|
|
* Check that the list of supported filters has a filter that differs
|
|
|
|
* from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
|
|
|
|
* in this case that filter will be used and the flag
|
|
|
|
* EFX_FILTER_MATCH_OUTER_VID is not needed.
|
|
|
|
*
|
|
|
|
* @param match[in]
|
|
|
|
* The match flags of filter.
|
|
|
|
* @param spec[in]
|
|
|
|
* Specification to be supplemented.
|
|
|
|
* @param filter[in]
|
|
|
|
* SFC filter with list of supported filters.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
|
|
|
|
__rte_unused efx_filter_spec_t *spec,
|
|
|
|
struct sfc_filter *filter)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
efx_filter_match_flags_t match_without_vid =
|
|
|
|
match & ~EFX_FILTER_MATCH_OUTER_VID;
|
|
|
|
|
|
|
|
for (i = 0; i < filter->supported_match_num; i++) {
|
|
|
|
if (match_without_vid == filter->supported_match[i])
|
|
|
|
return B_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_TRUE;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:57 +00:00
|
|
|
/*
|
|
|
|
* Match flags that can be automatically added to filters.
|
|
|
|
* Selecting the last minimum when searching for the copy flag ensures that the
|
|
|
|
* EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
|
|
|
|
* EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
|
|
|
|
* EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
|
|
|
|
* filters.
|
|
|
|
*/
|
2018-03-06 15:24:55 +00:00
|
|
|
static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
|
2018-03-06 15:24:57 +00:00
|
|
|
{
|
|
|
|
.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
|
|
|
|
.vals_count = 2,
|
|
|
|
.set_vals = sfc_flow_set_unknown_dst_flags,
|
|
|
|
.spec_check = sfc_flow_check_unknown_dst_flags,
|
|
|
|
},
|
2018-03-06 15:24:55 +00:00
|
|
|
{
|
|
|
|
.flag = EFX_FILTER_MATCH_ETHER_TYPE,
|
|
|
|
.vals_count = 2,
|
|
|
|
.set_vals = sfc_flow_set_ethertypes,
|
2018-03-06 15:24:56 +00:00
|
|
|
.spec_check = NULL,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
|
|
|
|
.vals_count = 2,
|
|
|
|
.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
|
|
|
|
.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
|
2018-03-06 15:24:55 +00:00
|
|
|
},
|
2018-07-14 07:38:59 +00:00
|
|
|
{
|
|
|
|
.flag = EFX_FILTER_MATCH_OUTER_VID,
|
|
|
|
.vals_count = 1,
|
|
|
|
.set_vals = sfc_flow_set_outer_vid_flag,
|
|
|
|
.spec_check = sfc_flow_check_outer_vid_flag,
|
|
|
|
},
|
2018-03-06 15:24:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Get item from array sfc_flow_copy_flags */
|
|
|
|
static const struct sfc_flow_copy_flag *
|
|
|
|
sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
|
|
|
|
if (sfc_flow_copy_flags[i].flag == flag)
|
|
|
|
return &sfc_flow_copy_flags[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Make copies of the specifications, set match flag and values
|
|
|
|
* of the field that corresponds to it.
|
|
|
|
*
|
|
|
|
* @param spec[in, out]
|
|
|
|
* SFC flow specification to update.
|
|
|
|
* @param flag[in]
|
|
|
|
* The match flag to add.
|
|
|
|
* @param error[out]
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
|
|
|
|
efx_filter_match_flags_t flag,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int new_filters_count;
|
|
|
|
unsigned int filters_count_for_one_val;
|
|
|
|
const struct sfc_flow_copy_flag *copy_flag;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:55 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
copy_flag = sfc_flow_get_copy_flag(flag);
|
|
|
|
if (copy_flag == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Unsupported spec field for copying");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
new_filters_count = spec_filter->count * copy_flag->vals_count;
|
2018-03-06 15:24:55 +00:00
|
|
|
if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
|
|
|
|
rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Too much EFX specifications in the flow rule");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy filters specifications */
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = spec_filter->count; i < new_filters_count; i++) {
|
|
|
|
spec_filter->filters[i] =
|
|
|
|
spec_filter->filters[i - spec_filter->count];
|
|
|
|
}
|
2018-03-06 15:24:55 +00:00
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
filters_count_for_one_val = spec_filter->count;
|
|
|
|
spec_filter->count = new_filters_count;
|
2018-03-06 15:24:55 +00:00
|
|
|
|
|
|
|
rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check that the given set of match flags missing in the original filter spec
|
|
|
|
* could be covered by adding spec copies which specify the corresponding
|
|
|
|
* flags and packet field values to match.
|
|
|
|
*
|
|
|
|
* @param miss_flags[in]
|
|
|
|
* Flags that are missing until the supported filter.
|
2018-03-06 15:24:56 +00:00
|
|
|
* @param spec[in]
|
|
|
|
* Specification to be supplemented.
|
|
|
|
* @param filter[in]
|
|
|
|
* SFC filter.
|
2018-03-06 15:24:55 +00:00
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Number of specifications after copy or 0, if the flags can not be added.
|
|
|
|
*/
|
|
|
|
static unsigned int
|
2018-03-06 15:24:56 +00:00
|
|
|
sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
|
|
|
|
efx_filter_spec_t *spec,
|
|
|
|
struct sfc_filter *filter)
|
2018-03-06 15:24:55 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
efx_filter_match_flags_t copy_flags = 0;
|
|
|
|
efx_filter_match_flags_t flag;
|
2018-03-06 15:24:56 +00:00
|
|
|
efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
|
|
|
|
sfc_flow_spec_check *check;
|
2018-03-06 15:24:55 +00:00
|
|
|
unsigned int multiplier = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
|
|
|
|
flag = sfc_flow_copy_flags[i].flag;
|
2018-03-06 15:24:56 +00:00
|
|
|
check = sfc_flow_copy_flags[i].spec_check;
|
2018-03-06 15:24:55 +00:00
|
|
|
if ((flag & miss_flags) == flag) {
|
2018-03-06 15:24:56 +00:00
|
|
|
if (check != NULL && (!check(match, spec, filter)))
|
|
|
|
continue;
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
copy_flags |= flag;
|
|
|
|
multiplier *= sfc_flow_copy_flags[i].vals_count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_flags == miss_flags)
|
|
|
|
return multiplier;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Attempt to supplement the specification template to the minimally
|
|
|
|
* supported set of match flags. To do this, it is necessary to copy
|
|
|
|
* the specifications, filling them with the values of fields that
|
|
|
|
* correspond to the missing flags.
|
|
|
|
* The necessary and sufficient filter is built from the fewest number
|
|
|
|
* of copies which could be made to cover the minimally required set
|
|
|
|
* of flags.
|
|
|
|
*
|
|
|
|
* @param sa[in]
|
|
|
|
* SFC adapter.
|
|
|
|
* @param spec[in, out]
|
|
|
|
* SFC flow specification to update.
|
|
|
|
* @param error[out]
|
|
|
|
* Perform verbose error reporting if not NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
|
|
|
|
struct sfc_flow_spec *spec,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:55 +00:00
|
|
|
struct sfc_filter *filter = &sa->filter;
|
|
|
|
efx_filter_match_flags_t miss_flags;
|
|
|
|
efx_filter_match_flags_t min_miss_flags = 0;
|
|
|
|
efx_filter_match_flags_t match;
|
|
|
|
unsigned int min_multiplier = UINT_MAX;
|
|
|
|
unsigned int multiplier;
|
|
|
|
unsigned int i;
|
|
|
|
int rc;
|
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
match = spec_filter->template.efs_match_flags;
|
2018-03-06 15:24:55 +00:00
|
|
|
for (i = 0; i < filter->supported_match_num; i++) {
|
|
|
|
if ((match & filter->supported_match[i]) == match) {
|
|
|
|
miss_flags = filter->supported_match[i] & (~match);
|
2018-03-06 15:24:56 +00:00
|
|
|
multiplier = sfc_flow_check_missing_flags(miss_flags,
|
2020-03-05 10:47:49 +00:00
|
|
|
&spec_filter->template, filter);
|
2018-03-06 15:24:55 +00:00
|
|
|
if (multiplier > 0) {
|
|
|
|
if (multiplier <= min_multiplier) {
|
|
|
|
min_multiplier = multiplier;
|
|
|
|
min_miss_flags = miss_flags;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (min_multiplier == UINT_MAX) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
2018-05-16 13:24:03 +00:00
|
|
|
"The flow rule pattern is unsupported");
|
2018-03-06 15:24:55 +00:00
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
|
|
|
|
efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
|
|
|
|
|
|
|
|
if ((flag & min_miss_flags) == flag) {
|
|
|
|
rc = sfc_flow_spec_add_match_flag(spec, flag, error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:58 +00:00
|
|
|
/**
|
|
|
|
* Check that set of match flags is referred to by a filter. Filter is
|
|
|
|
* described by match flags with the ability to add OUTER_VID and INNER_VID
|
|
|
|
* flags.
|
|
|
|
*
|
|
|
|
* @param match_flags[in]
|
|
|
|
* Set of match flags.
|
|
|
|
* @param flags_pattern[in]
|
|
|
|
* Pattern of filter match flags.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
|
|
|
|
efx_filter_match_flags_t flags_pattern)
|
|
|
|
{
|
|
|
|
if ((match_flags & flags_pattern) != flags_pattern)
|
|
|
|
return B_FALSE;
|
|
|
|
|
|
|
|
switch (match_flags & ~flags_pattern) {
|
|
|
|
case 0:
|
|
|
|
case EFX_FILTER_MATCH_OUTER_VID:
|
|
|
|
case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
|
|
|
|
return B_TRUE;
|
|
|
|
default:
|
|
|
|
return B_FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check whether the spec maps to a hardware filter which is known to be
|
|
|
|
* ineffective despite being valid.
|
|
|
|
*
|
2018-07-14 07:38:23 +00:00
|
|
|
* @param filter[in]
|
|
|
|
* SFC filter with list of supported filters.
|
2018-03-06 15:24:58 +00:00
|
|
|
* @param spec[in]
|
|
|
|
* SFC flow specification.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
2018-07-14 07:38:23 +00:00
|
|
|
sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
|
|
|
|
struct sfc_flow_spec *spec)
|
2018-03-06 15:24:58 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
uint16_t ether_type;
|
|
|
|
uint8_t ip_proto;
|
|
|
|
efx_filter_match_flags_t match_flags;
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
2018-03-06 15:24:58 +00:00
|
|
|
|
2020-03-05 10:47:49 +00:00
|
|
|
for (i = 0; i < spec_filter->count; i++) {
|
|
|
|
match_flags = spec_filter->filters[i].efs_match_flags;
|
2018-03-06 15:24:58 +00:00
|
|
|
|
|
|
|
if (sfc_flow_is_match_with_vids(match_flags,
|
|
|
|
EFX_FILTER_MATCH_ETHER_TYPE) ||
|
|
|
|
sfc_flow_is_match_with_vids(match_flags,
|
|
|
|
EFX_FILTER_MATCH_ETHER_TYPE |
|
|
|
|
EFX_FILTER_MATCH_LOC_MAC)) {
|
2020-03-05 10:47:49 +00:00
|
|
|
ether_type = spec_filter->filters[i].efs_ether_type;
|
2018-07-14 07:38:23 +00:00
|
|
|
if (filter->supports_ip_proto_or_addr_filter &&
|
|
|
|
(ether_type == EFX_ETHER_TYPE_IPV4 ||
|
|
|
|
ether_type == EFX_ETHER_TYPE_IPV6))
|
2018-03-06 15:24:58 +00:00
|
|
|
return B_TRUE;
|
|
|
|
} else if (sfc_flow_is_match_with_vids(match_flags,
|
|
|
|
EFX_FILTER_MATCH_ETHER_TYPE |
|
|
|
|
EFX_FILTER_MATCH_IP_PROTO) ||
|
|
|
|
sfc_flow_is_match_with_vids(match_flags,
|
|
|
|
EFX_FILTER_MATCH_ETHER_TYPE |
|
|
|
|
EFX_FILTER_MATCH_IP_PROTO |
|
|
|
|
EFX_FILTER_MATCH_LOC_MAC)) {
|
2020-03-05 10:47:49 +00:00
|
|
|
ip_proto = spec_filter->filters[i].efs_ip_proto;
|
2018-07-14 07:38:23 +00:00
|
|
|
if (filter->supports_rem_or_local_port_filter &&
|
|
|
|
(ip_proto == EFX_IPPROTO_TCP ||
|
|
|
|
ip_proto == EFX_IPPROTO_UDP))
|
2018-03-06 15:24:58 +00:00
|
|
|
return B_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_FALSE;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_validate_match_flags(struct sfc_adapter *sa,
|
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2020-03-05 10:47:49 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
|
|
|
efx_filter_spec_t *spec_tmpl = &spec_filter->template;
|
2018-03-06 15:24:55 +00:00
|
|
|
efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Initialize the first filter spec with template */
|
2020-03-05 10:47:49 +00:00
|
|
|
spec_filter->filters[0] = *spec_tmpl;
|
|
|
|
spec_filter->count = 1;
|
2018-03-06 15:24:55 +00:00
|
|
|
|
|
|
|
if (!sfc_filter_is_match_supported(sa, match_flags)) {
|
|
|
|
rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-07-14 07:38:23 +00:00
|
|
|
if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
|
2018-03-06 15:24:58 +00:00
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"The flow rule pattern is unsupported");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
static int
|
2020-03-05 10:47:51 +00:00
|
|
|
sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_item pattern[],
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
2017-03-09 15:26:27 +00:00
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2020-03-05 10:47:53 +00:00
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
|
|
|
|
struct sfc_flow_parse_ctx ctx;
|
2017-03-09 15:26:27 +00:00
|
|
|
int rc;
|
|
|
|
|
2020-03-05 10:47:53 +00:00
|
|
|
ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
|
|
|
|
ctx.filter = &spec_filter->template;
|
|
|
|
|
2021-07-21 04:15:05 +00:00
|
|
|
rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
|
2020-03-05 10:47:53 +00:00
|
|
|
pattern, &ctx, error);
|
2017-03-09 15:26:27 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_bad_value;
|
|
|
|
|
|
|
|
rc = sfc_flow_parse_actions(sa, actions, flow, error);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_bad_value;
|
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
rc = sfc_flow_validate_match_flags(sa, flow, error);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_bad_value;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2018-03-06 15:24:55 +00:00
|
|
|
return 0;
|
2018-03-06 15:24:54 +00:00
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
fail_bad_value:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-10-20 09:12:49 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_item pattern[],
|
2020-10-20 09:12:55 +00:00
|
|
|
const struct rte_flow_action actions[],
|
2020-10-20 09:12:49 +00:00
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
|
|
|
struct sfc_flow_spec *spec = &flow->spec;
|
|
|
|
struct sfc_flow_spec_mae *spec_mae = &spec->mae;
|
|
|
|
int rc;
|
|
|
|
|
2021-10-13 13:15:06 +00:00
|
|
|
/*
|
|
|
|
* If the flow is meant to be a JUMP rule in tunnel offload,
|
|
|
|
* preparse its actions and save its properties in spec_mae.
|
|
|
|
*/
|
|
|
|
rc = sfc_flow_tunnel_detect_jump_rule(sa, actions, spec_mae, error);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail;
|
|
|
|
|
2020-10-20 09:12:49 +00:00
|
|
|
rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
|
|
|
|
if (rc != 0)
|
2021-10-13 13:15:06 +00:00
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
|
|
|
|
/*
|
2021-10-13 13:15:11 +00:00
|
|
|
* By design, this flow should be represented solely by the
|
|
|
|
* outer rule. But the HW/FW hasn't got support for setting
|
|
|
|
* Rx mark from RECIRC_ID on outer rule lookup yet. Neither
|
|
|
|
* does it support outer rule counters. As a workaround, an
|
|
|
|
* action rule of lower priority is used to do the job.
|
|
|
|
*
|
|
|
|
* So don't skip sfc_mae_rule_parse_actions() below.
|
2021-10-13 13:15:06 +00:00
|
|
|
*/
|
|
|
|
}
|
2020-10-20 09:12:49 +00:00
|
|
|
|
2021-03-12 11:07:42 +00:00
|
|
|
rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
|
2020-10-20 09:12:55 +00:00
|
|
|
if (rc != 0)
|
2021-10-13 13:15:06 +00:00
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (spec_mae->ft != NULL) {
|
|
|
|
if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
|
|
|
|
spec_mae->ft->jump_rule_is_set = B_TRUE;
|
|
|
|
|
|
|
|
++(spec_mae->ft->refcnt);
|
|
|
|
}
|
2020-10-20 09:12:55 +00:00
|
|
|
|
2020-10-20 09:12:49 +00:00
|
|
|
return 0;
|
2021-10-13 13:15:06 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
/* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
|
|
|
|
spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
|
|
|
|
spec_mae->ft = NULL;
|
|
|
|
|
|
|
|
return rc;
|
2020-10-20 09:12:49 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:51 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_parse(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item pattern[],
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2020-10-20 09:12:47 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2020-03-05 10:47:51 +00:00
|
|
|
const struct sfc_flow_ops_by_spec *ops;
|
|
|
|
int rc;
|
|
|
|
|
2020-10-20 09:12:47 +00:00
|
|
|
rc = sfc_flow_parse_attr(sa, attr, flow, error);
|
2020-03-05 10:47:51 +00:00
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
ops = sfc_flow_get_ops_by_spec(flow);
|
|
|
|
if (ops == NULL || ops->parse == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"No backend to handle this flow");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ops->parse(dev, pattern, actions, flow, error);
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:50 +00:00
|
|
|
static struct rte_flow *
|
|
|
|
sfc_flow_zmalloc(struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
struct rte_flow *flow;
|
|
|
|
|
|
|
|
flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
|
|
|
|
if (flow == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOMEM,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Failed to allocate memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
return flow;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-10-20 09:12:49 +00:00
|
|
|
sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
|
2020-03-05 10:47:50 +00:00
|
|
|
{
|
2020-10-20 09:12:49 +00:00
|
|
|
const struct sfc_flow_ops_by_spec *ops;
|
|
|
|
|
|
|
|
ops = sfc_flow_get_ops_by_spec(flow);
|
|
|
|
if (ops != NULL && ops->cleanup != NULL)
|
|
|
|
ops->cleanup(sa, flow);
|
|
|
|
|
2020-03-05 10:47:50 +00:00
|
|
|
rte_free(flow);
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:47:52 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
const struct sfc_flow_ops_by_spec *ops;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ops = sfc_flow_get_ops_by_spec(flow);
|
|
|
|
if (ops == NULL || ops->insert == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"No backend to handle this flow");
|
|
|
|
return rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = ops->insert(sa, flow);
|
|
|
|
if (rc != 0) {
|
|
|
|
rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL, "Failed to insert the flow rule");
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
const struct sfc_flow_ops_by_spec *ops;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ops = sfc_flow_get_ops_by_spec(flow);
|
|
|
|
if (ops == NULL || ops->remove == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"No backend to handle this flow");
|
|
|
|
return rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = ops->remove(sa, flow);
|
|
|
|
if (rc != 0) {
|
|
|
|
rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL, "Failed to remove the flow rule");
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-10-20 09:12:53 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
const struct sfc_flow_ops_by_spec *ops;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
ops = sfc_flow_get_ops_by_spec(flow);
|
|
|
|
if (ops == NULL) {
|
|
|
|
rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"No backend to handle this flow");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->verify != NULL) {
|
2020-10-20 09:13:41 +00:00
|
|
|
SFC_ASSERT(sfc_adapter_is_locked(sa));
|
2020-10-20 09:12:53 +00:00
|
|
|
rc = ops->verify(sa, flow);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc != 0) {
|
|
|
|
rte_flow_error_set(error, rc,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Failed to verify flow validity with FW");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_validate(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item pattern[],
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2020-03-05 10:47:50 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
|
|
|
struct rte_flow *flow;
|
|
|
|
int rc;
|
2017-03-09 15:26:27 +00:00
|
|
|
|
2020-03-05 10:47:50 +00:00
|
|
|
flow = sfc_flow_zmalloc(error);
|
|
|
|
if (flow == NULL)
|
|
|
|
return -rte_errno;
|
2018-01-18 07:32:55 +00:00
|
|
|
|
2020-10-20 09:13:41 +00:00
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2020-03-05 10:47:50 +00:00
|
|
|
rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
|
2020-10-20 09:12:53 +00:00
|
|
|
if (rc == 0)
|
|
|
|
rc = sfc_flow_verify(sa, flow, error);
|
2020-03-05 10:47:50 +00:00
|
|
|
|
|
|
|
sfc_flow_free(sa, flow);
|
|
|
|
|
2020-10-20 09:13:41 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
2020-03-05 10:47:50 +00:00
|
|
|
return rc;
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct rte_flow *
|
|
|
|
sfc_flow_create(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item pattern[],
|
|
|
|
const struct rte_flow_action actions[],
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow *flow = NULL;
|
|
|
|
int rc;
|
|
|
|
|
2020-03-05 10:47:50 +00:00
|
|
|
flow = sfc_flow_zmalloc(error);
|
|
|
|
if (flow == NULL)
|
2017-03-09 15:26:27 +00:00
|
|
|
goto fail_no_mem;
|
|
|
|
|
2020-10-20 09:13:41 +00:00
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_bad_value;
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
|
2019-11-03 10:33:09 +00:00
|
|
|
|
2021-10-11 14:48:28 +00:00
|
|
|
if (sa->state == SFC_ETHDEV_STARTED) {
|
2020-03-05 10:47:52 +00:00
|
|
|
rc = sfc_flow_insert(sa, flow, error);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_flow_insert;
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return flow;
|
|
|
|
|
2020-03-05 10:47:52 +00:00
|
|
|
fail_flow_insert:
|
2020-03-05 10:47:48 +00:00
|
|
|
TAILQ_REMOVE(&sa->flow_list, flow, entries);
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
fail_bad_value:
|
2020-03-05 10:47:50 +00:00
|
|
|
sfc_flow_free(sa, flow);
|
2017-03-09 15:26:27 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
fail_no_mem:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_destroy(struct rte_eth_dev *dev,
|
|
|
|
struct rte_flow *flow,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow *flow_ptr;
|
|
|
|
int rc = EINVAL;
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
|
2017-03-09 15:26:27 +00:00
|
|
|
if (flow_ptr == flow)
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
if (rc != 0) {
|
|
|
|
rte_flow_error_set(error, rc,
|
|
|
|
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
|
|
|
"Failed to find flow rule to destroy");
|
|
|
|
goto fail_bad_value;
|
|
|
|
}
|
|
|
|
|
2021-10-11 14:48:28 +00:00
|
|
|
if (sa->state == SFC_ETHDEV_STARTED)
|
2020-03-05 10:47:52 +00:00
|
|
|
rc = sfc_flow_remove(sa, flow, error);
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&sa->flow_list, flow, entries);
|
|
|
|
sfc_flow_free(sa, flow);
|
2017-03-09 15:26:27 +00:00
|
|
|
|
|
|
|
fail_bad_value:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_flush(struct rte_eth_dev *dev,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow *flow;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
|
2021-10-11 14:48:28 +00:00
|
|
|
if (sa->state == SFC_ETHDEV_STARTED) {
|
2020-03-05 10:47:52 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = sfc_flow_remove(sa, flow, error);
|
|
|
|
if (rc != 0)
|
|
|
|
ret = rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&sa->flow_list, flow, entries);
|
|
|
|
sfc_flow_free(sa, flow);
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return -ret;
|
|
|
|
}
|
|
|
|
|
2021-07-02 08:39:48 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_query(struct rte_eth_dev *dev,
|
|
|
|
struct rte_flow *flow,
|
|
|
|
const struct rte_flow_action *action,
|
|
|
|
void *data,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
|
|
|
const struct sfc_flow_ops_by_spec *ops;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
ops = sfc_flow_get_ops_by_spec(flow);
|
|
|
|
if (ops == NULL || ops->query == NULL) {
|
|
|
|
ret = rte_flow_error_set(error, ENOTSUP,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"No backend to handle this flow");
|
|
|
|
goto fail_no_backend;
|
|
|
|
}
|
|
|
|
|
2021-10-11 14:48:28 +00:00
|
|
|
if (sa->state != SFC_ETHDEV_STARTED) {
|
2021-07-02 08:39:48 +00:00
|
|
|
ret = rte_flow_error_set(error, EINVAL,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
|
|
|
"Can't query the flow: the adapter is not started");
|
|
|
|
goto fail_not_started;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ops->query(dev, flow, action, data, error);
|
|
|
|
if (ret != 0)
|
|
|
|
goto fail_query;
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_query:
|
|
|
|
fail_not_started:
|
|
|
|
fail_no_backend:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-20 16:37:09 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-06-20 16:37:09 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
2021-10-11 14:48:28 +00:00
|
|
|
if (sa->state != SFC_ETHDEV_INITIALIZED) {
|
2017-06-20 16:37:09 +00:00
|
|
|
rte_flow_error_set(error, EBUSY,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL, "please close the port first");
|
|
|
|
ret = -rte_errno;
|
|
|
|
} else {
|
2019-02-07 12:17:50 +00:00
|
|
|
sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
|
2017-06-20 16:37:09 +00:00
|
|
|
}
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-10-15 06:49:03 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
|
|
|
|
uint16_t *transfer_proxy_port,
|
|
|
|
struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
|
|
|
|
transfer_proxy_port);
|
|
|
|
if (ret != 0) {
|
|
|
|
return rte_flow_error_set(error, ret,
|
|
|
|
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
|
|
|
NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:26:27 +00:00
|
|
|
const struct rte_flow_ops sfc_flow_ops = {
|
|
|
|
.validate = sfc_flow_validate,
|
|
|
|
.create = sfc_flow_create,
|
|
|
|
.destroy = sfc_flow_destroy,
|
|
|
|
.flush = sfc_flow_flush,
|
2021-07-02 08:39:48 +00:00
|
|
|
.query = sfc_flow_query,
|
2017-06-20 16:37:09 +00:00
|
|
|
.isolate = sfc_flow_isolate,
|
2021-10-13 13:15:09 +00:00
|
|
|
.tunnel_decap_set = sfc_flow_tunnel_decap_set,
|
|
|
|
.tunnel_match = sfc_flow_tunnel_match,
|
|
|
|
.tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release,
|
|
|
|
.tunnel_item_release = sfc_flow_tunnel_item_release,
|
|
|
|
.get_restore_info = sfc_flow_tunnel_get_restore_info,
|
2021-10-15 06:49:03 +00:00
|
|
|
.pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
|
2017-03-09 15:26:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
sfc_flow_init(struct sfc_adapter *sa)
|
|
|
|
{
|
|
|
|
SFC_ASSERT(sfc_adapter_is_locked(sa));
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
TAILQ_INIT(&sa->flow_list);
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sfc_flow_fini(struct sfc_adapter *sa)
|
|
|
|
{
|
|
|
|
struct rte_flow *flow;
|
|
|
|
|
|
|
|
SFC_ASSERT(sfc_adapter_is_locked(sa));
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&sa->flow_list, flow, entries);
|
2020-03-05 10:47:50 +00:00
|
|
|
sfc_flow_free(sa, flow);
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sfc_flow_stop(struct sfc_adapter *sa)
|
|
|
|
{
|
2020-09-24 12:40:59 +00:00
|
|
|
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
|
|
|
|
struct sfc_rss *rss = &sas->rss;
|
2017-03-09 15:26:27 +00:00
|
|
|
struct rte_flow *flow;
|
|
|
|
|
|
|
|
SFC_ASSERT(sfc_adapter_is_locked(sa));
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
TAILQ_FOREACH(flow, &sa->flow_list, entries)
|
2020-03-05 10:47:52 +00:00
|
|
|
sfc_flow_remove(sa, flow, NULL);
|
2020-09-24 12:40:59 +00:00
|
|
|
|
|
|
|
if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
|
|
|
|
efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
|
|
|
|
rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
|
|
|
|
}
|
2021-07-02 08:39:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* MAE counter service is not stopped on flow rule remove to avoid
|
|
|
|
* extra work. Make sure that it is stopped here.
|
|
|
|
*/
|
|
|
|
sfc_mae_counter_stop(sa);
|
2017-03-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sfc_flow_start(struct sfc_adapter *sa)
|
|
|
|
{
|
|
|
|
struct rte_flow *flow;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
|
|
|
SFC_ASSERT(sfc_adapter_is_locked(sa));
|
|
|
|
|
2021-10-13 13:15:12 +00:00
|
|
|
sfc_flow_tunnel_reset_hit_counters(sa);
|
|
|
|
|
2020-03-05 10:47:48 +00:00
|
|
|
TAILQ_FOREACH(flow, &sa->flow_list, entries) {
|
2020-03-05 10:47:52 +00:00
|
|
|
rc = sfc_flow_insert(sa, flow, NULL);
|
2017-03-09 15:26:27 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto fail_bad_flow;
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_log_init(sa, "done");
|
|
|
|
|
|
|
|
fail_bad_flow:
|
|
|
|
return rc;
|
|
|
|
}
|