ethdev: increase flow type limit from 32 to 64

Increase the internal limit for flow types from 32 to 64
to support future flow type extensions.

Change type of variables from uint32_t[] to uint64_t[]:
rte_eth_fdir_info.flow_types_mask
rte_eth_hash_global_conf.sym_hash_enable_mask
rte_eth_hash_global_conf.valid_bit_mask

This modification affects the following components:
net/i40e
net/ixgbe
app/testpmd

ABI versioning used to keep ABI stability.

Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Kirill Rybalchenko 2018-01-15 17:33:32 +00:00 committed by Ferruh Yigit
parent 5d020410f8
commit 2c6b19af78
7 changed files with 222 additions and 59 deletions

View File

@ -10899,7 +10899,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result,
struct rte_eth_fdir_info fdir_info;
struct rte_eth_fdir_flex_mask flex_mask;
struct rte_port *port;
uint32_t flow_type_mask;
uint64_t flow_type_mask;
uint16_t i;
int ret;
@ -10952,7 +10952,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result,
return;
}
for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
if (flow_type_mask & (1 << i)) {
if (flow_type_mask & (1ULL << i)) {
flex_mask.flow_type = i;
fdir_set_flex_mask(res->port_id, &flex_mask);
}
@ -10961,7 +10961,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result,
return;
}
flex_mask.flow_type = str2flowtype(res->flow_type);
if (!(flow_type_mask & (1 << flex_mask.flow_type))) {
if (!(flow_type_mask & (1ULL << flex_mask.flow_type))) {
printf("Flow type %s not supported on port %d\n",
res->flow_type, res->port_id);
return;
@ -11323,10 +11323,10 @@ cmd_get_hash_global_config_parsed(void *parsed_result,
}
for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
idx = i / UINT32_BIT;
offset = i % UINT32_BIT;
idx = i / UINT64_BIT;
offset = i % UINT64_BIT;
if (!(info.info.global_conf.valid_bit_mask[idx] &
(1UL << offset)))
(1ULL << offset)))
continue;
str = flowtype_to_str(i);
if (!str)
@ -11334,7 +11334,7 @@ cmd_get_hash_global_config_parsed(void *parsed_result,
printf("Symmetric hash is %s globally for flow type %s "
"by port %d\n",
((info.info.global_conf.sym_hash_enable_mask[idx] &
(1UL << offset)) ? "enabled" : "disabled"), str,
(1ULL << offset)) ? "enabled" : "disabled"), str,
res->port_id);
}
}
@ -11395,12 +11395,12 @@ cmd_set_hash_global_config_parsed(void *parsed_result,
RTE_ETH_HASH_FUNCTION_DEFAULT;
ftype = str2flowtype(res->flow_type);
idx = ftype / (CHAR_BIT * sizeof(uint32_t));
offset = ftype % (CHAR_BIT * sizeof(uint32_t));
info.info.global_conf.valid_bit_mask[idx] |= (1UL << offset);
idx = ftype / UINT64_BIT;
offset = ftype % UINT64_BIT;
info.info.global_conf.valid_bit_mask[idx] |= (1ULL << offset);
if (!strcmp(res->enable, "enable"))
info.info.global_conf.sym_hash_enable_mask[idx] |=
(1UL << offset);
(1ULL << offset);
ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
RTE_ETH_FILTER_SET, &info);
if (ret < 0)

View File

@ -8137,14 +8137,17 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
/*
* We work only with lowest 32 bits which is not correct, but to work
* properly the valid_bit_mask size should be increased up to 64 bits
* and this will brake ABI. This modification will be done in next
* release
* As i40e supports less than 64 flow types, only first 64 bits need to
* be checked.
*/
g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
g_cfg->valid_bit_mask[i] = 0ULL;
g_cfg->sym_hash_enable_mask[i] = 0ULL;
}
for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
if (!adapter->pctypes_tbl[i])
continue;
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
@ -8153,7 +8156,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
g_cfg->sym_hash_enable_mask[0] |=
(1UL << i);
(1ULL << i);
}
}
}
@ -8167,7 +8170,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter,
const struct rte_eth_hash_global_conf *g_cfg)
{
uint32_t i;
uint32_t mask0, i40e_mask = adapter->flow_types_mask;
uint64_t mask0, i40e_mask = adapter->flow_types_mask;
if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
@ -8178,7 +8181,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter,
}
/*
* As i40e supports less than 32 flow types, only first 32 bits need to
* As i40e supports less than 64 flow types, only first 64 bits need to
* be checked.
*/
mask0 = g_cfg->valid_bit_mask[0];
@ -8214,23 +8217,20 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
int ret;
uint16_t i, j;
uint32_t reg;
/*
* We work only with lowest 32 bits which is not correct, but to work
* properly the valid_bit_mask size should be increased up to 64 bits
* and this will brake ABI. This modification will be done in next
* release
*/
uint32_t mask0 = g_cfg->valid_bit_mask[0] &
(uint32_t)adapter->flow_types_mask;
uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
/* Check the input parameters */
ret = i40e_hash_global_config_check(adapter, g_cfg);
if (ret < 0)
return ret;
for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
/*
* As i40e supports less than 64 flow types, only first 64 bits need to
* be configured.
*/
for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
if (mask0 & (1UL << i)) {
reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
for (j = I40E_FILTER_PCTYPE_INVALID + 1;

View File

@ -66,17 +66,17 @@
#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
#define I40E_FDIR_FLOWS ( \
(1 << RTE_ETH_FLOW_FRAG_IPV4) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
(1 << RTE_ETH_FLOW_FRAG_IPV6) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
(1 << RTE_ETH_FLOW_L2_PAYLOAD))
(1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
(1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
(1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
@ -1999,6 +1999,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t num_flex_set = 0;
uint16_t num_flex_mask = 0;
uint16_t i;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
fdir->mode = RTE_FDIR_MODE_PERFECT;
@ -2011,6 +2012,8 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
(uint32_t)hw->func_caps.fd_filters_best_effort;
fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
fdir->flow_types_mask[i] = 0ULL;
fdir->flex_payload_unit = sizeof(uint16_t);
fdir->flex_bitmask_unit = sizeof(uint16_t);
fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;

View File

@ -41,14 +41,14 @@
#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
#define IXGBE_FDIR_FLOW_TYPES ( \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
uint8_t ipv6_addr[16]; \
@ -1407,7 +1407,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t fdirctrl, max_num;
uint32_t fdirctrl, max_num, i;
uint8_t offset;
fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
@ -1439,9 +1439,11 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
fdir_info->flow_types_mask[0] = 0;
fdir_info->flow_types_mask[0] = 0ULL;
else
fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
fdir_info->flow_types_mask[i] = 0ULL;
fdir_info->flex_payload_unit = sizeof(uint16_t);
fdir_info->max_flex_payload_segment_num = 1;

View File

@ -662,9 +662,9 @@ enum rte_fdir_mode {
RTE_FDIR_MODE_PERFECT_TUNNEL, /**< Enable FDIR filter mode - tunnel. */
};
#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
#define RTE_FLOW_MASK_ARRAY_SIZE \
(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
/**
* A structure used to get the information of flow director filter.
@ -681,7 +681,7 @@ struct rte_eth_fdir_info {
uint32_t guarant_spc; /**< Guaranteed spaces.*/
uint32_t best_spc; /**< Best effort spaces.*/
/** Bit mask for every supported flow type. */
uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
uint32_t max_flexpayload; /**< Total flex payload in bytes. */
/** Flexible payload unit in bytes. Size and alignments of all flex
payload segments should be multiplies of this value. */
@ -774,7 +774,7 @@ enum rte_eth_hash_function {
};
#define RTE_SYM_HASH_MASK_ARRAY_SIZE \
(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
(RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
/**
* A structure used to set or get global hash function configurations which
* include symmetric hash enable per flow type and hash function type.
@ -787,9 +787,9 @@ enum rte_eth_hash_function {
struct rte_eth_hash_global_conf {
enum rte_eth_hash_function hash_func; /**< Hash function type */
/** Bit mask for symmetric hash enable per flow type */
uint32_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
uint64_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
/** Bit mask indicates if the corresponding bit is valid */
uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
};
/**

View File

@ -34,6 +34,7 @@
#include <rte_errno.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
#include <rte_compat.h>
#include "rte_ether.h"
#include "rte_ethdev.h"
@ -3148,8 +3149,153 @@ rte_eth_dev_filter_supported(uint16_t port_id,
}
int
rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
int
rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_fdir_info_v22 {
enum rte_fdir_mode mode;
struct rte_eth_fdir_masks mask;
struct rte_eth_fdir_flex_conf flex_conf;
uint32_t guarant_spc;
uint32_t best_spc;
uint32_t flow_types_mask[1];
uint32_t max_flexpayload;
uint32_t flex_payload_unit;
uint32_t max_flex_payload_segment_num;
uint16_t flex_payload_limit;
uint32_t flex_bitmask_unit;
uint32_t max_flex_bitmask_num;
};
struct rte_eth_hash_global_conf_v22 {
enum rte_eth_hash_function hash_func;
uint32_t sym_hash_enable_mask[1];
uint32_t valid_bit_mask[1];
};
struct rte_eth_hash_filter_info_v22 {
enum rte_eth_hash_filter_info_type info_type;
union {
uint8_t enable;
struct rte_eth_hash_global_conf_v22 global_conf;
struct rte_eth_input_set_conf input_set_conf;
} info;
};
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
if (filter_op == RTE_ETH_FILTER_INFO) {
int retval;
struct rte_eth_fdir_info_v22 *fdir_info_v22;
struct rte_eth_fdir_info fdir_info;
fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
filter_op, (void *)&fdir_info);
fdir_info_v22->mode = fdir_info.mode;
fdir_info_v22->mask = fdir_info.mask;
fdir_info_v22->flex_conf = fdir_info.flex_conf;
fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
fdir_info_v22->best_spc = fdir_info.best_spc;
fdir_info_v22->flow_types_mask[0] =
(uint32_t)fdir_info.flow_types_mask[0];
fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
fdir_info_v22->max_flex_payload_segment_num =
fdir_info.max_flex_payload_segment_num;
fdir_info_v22->flex_payload_limit =
fdir_info.flex_payload_limit;
fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
fdir_info_v22->max_flex_bitmask_num =
fdir_info.max_flex_bitmask_num;
return retval;
} else if (filter_op == RTE_ETH_FILTER_GET) {
int retval;
struct rte_eth_hash_filter_info f_info;
struct rte_eth_hash_filter_info_v22 *f_info_v22 =
(struct rte_eth_hash_filter_info_v22 *)arg;
f_info.info_type = f_info_v22->info_type;
retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
filter_op, (void *)&f_info);
switch (f_info_v22->info_type) {
case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
f_info_v22->info.enable = f_info.info.enable;
break;
case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
f_info_v22->info.global_conf.hash_func =
f_info.info.global_conf.hash_func;
f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
(uint32_t)
f_info.info.global_conf.sym_hash_enable_mask[0];
f_info_v22->info.global_conf.valid_bit_mask[0] =
(uint32_t)
f_info.info.global_conf.valid_bit_mask[0];
break;
case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
f_info_v22->info.input_set_conf =
f_info.info.input_set_conf;
break;
default:
break;
}
return retval;
} else if (filter_op == RTE_ETH_FILTER_SET) {
struct rte_eth_hash_filter_info f_info;
struct rte_eth_hash_filter_info_v22 *f_v22 =
(struct rte_eth_hash_filter_info_v22 *)arg;
f_info.info_type = f_v22->info_type;
switch (f_v22->info_type) {
case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
f_info.info.enable = f_v22->info.enable;
break;
case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
f_info.info.global_conf.hash_func =
f_v22->info.global_conf.hash_func;
f_info.info.global_conf.sym_hash_enable_mask[0] =
(uint32_t)
f_v22->info.global_conf.sym_hash_enable_mask[0];
f_info.info.global_conf.valid_bit_mask[0] =
(uint32_t)
f_v22->info.global_conf.valid_bit_mask[0];
break;
case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
f_info.info.input_set_conf =
f_v22->info.input_set_conf;
break;
default:
break;
}
return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
(void *)&f_info);
} else
return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
arg);
}
VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
int
rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
int
rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_dev *dev;
@ -3159,6 +3305,11 @@ rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
}
BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg),
rte_eth_dev_filter_ctrl_v1802);
void *
rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,

View File

@ -198,6 +198,13 @@ DPDK_17.11 {
} DPDK_17.08;
DPDK_18.02 {
global:
rte_eth_dev_filter_ctrl;
} DPDK_17.11;
EXPERIMENTAL {
global: