net/i40e: support hash configuration in RSS flow

This patch supports:

- Symmetric hash configuration
- Hash input set configuration

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
Acked-by: Beilei Xing <beilei.xing@intel.com>
This commit is contained in:
Chenxu Di 2020-04-15 08:46:09 +00:00 committed by Ferruh Yigit
parent 99d8ba79ef
commit feaae285b3
5 changed files with 711 additions and 103 deletions

View File

@ -44,6 +44,7 @@ Features of the i40e PMD are:
- Queue region configuration - Queue region configuration
- Virtual Function Port Representors - Virtual Function Port Representors
- Malicious Device Drive event catch and notify - Malicious Device Drive event catch and notify
- Generic flow API
Prerequisites Prerequisites
------------- -------------
@ -569,6 +570,42 @@ details please refer to :doc:`../testpmd_app_ug/index`.
testpmd> set port (port_id) queue-region flush (on|off) testpmd> set port (port_id) queue-region flush (on|off)
testpmd> show port (port_id) queue-region testpmd> show port (port_id) queue-region
Generic flow API
~~~~~~~~~~~~~~~~~~~
- ``RSS Flow``
RSS Flow supports to set hash input set, hash function, enable hash
and configure queue region.
For example:
Configure queue region as queue 0, 1, 2, 3.
.. code-block:: console
testpmd> flow create 0 ingress pattern end actions rss types end \
queues 0 1 2 3 end / end
Enable hash and set input set for ipv4-tcp.
.. code-block:: console
testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \
actions rss types ipv4-tcp l3-src-only end queues end / end
Set symmetric hash enable for flow type ipv4-tcp.
.. code-block:: console
testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \
actions rss types ipv4-tcp end queues end func symmetric_toeplitz / end
Set hash function as simple xor.
.. code-block:: console
testpmd> flow create 0 ingress pattern end actions rss types end \
queues end func simple_xor / end
Limitations or Known issues Limitations or Known issues
--------------------------- ---------------------------

View File

@ -96,6 +96,8 @@ New Features
Updated i40e PMD with new features and improvements, including: Updated i40e PMD with new features and improvements, including:
* Enable MAC address as FDIR input set for ipv4-other, ipv4-udp and ipv4-tcp. * Enable MAC address as FDIR input set for ipv4-other, ipv4-udp and ipv4-tcp.
* Added support for RSS using L3/L4 source/destination only.
* Added support for setting hash function in rte flow.
* **Updated the Intel iavf driver.** * **Updated the Intel iavf driver.**

View File

@ -1657,6 +1657,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
/* initialize mirror rule list */ /* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list); TAILQ_INIT(&pf->mirror_list);
/* initialize RSS rule list */
TAILQ_INIT(&pf->rss_config_list);
/* initialize Traffic Manager configuration */ /* initialize Traffic Manager configuration */
i40e_tm_conf_init(dev); i40e_tm_conf_init(dev);
@ -1676,7 +1679,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
/* initialize queue region configuration */ /* initialize queue region configuration */
i40e_init_queue_region_conf(dev); i40e_init_queue_region_conf(dev);
/* initialize rss configuration from rte_flow */ /* initialize RSS configuration from rte_flow */
memset(&pf->rss_info, 0, memset(&pf->rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf)); sizeof(struct i40e_rte_flow_rss_conf));
@ -12329,14 +12332,16 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
} }
} }
/* Restore rss filter */ /* Restore RSS filter */
static inline void static inline void
i40e_rss_filter_restore(struct i40e_pf *pf) i40e_rss_filter_restore(struct i40e_pf *pf)
{ {
struct i40e_rte_flow_rss_conf *conf = struct i40e_rss_conf_list *list = &pf->rss_config_list;
&pf->rss_info; struct i40e_rss_filter *filter;
if (conf->conf.queue_num)
i40e_config_rss_filter(pf, conf, TRUE); TAILQ_FOREACH(filter, list, next) {
i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
}
} }
static void static void
@ -12946,45 +12951,300 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
return 0; return 0;
} }
int /* Write HENA register to enable hash */
i40e_action_rss_same(const struct rte_flow_action_rss *comp, static int
const struct rte_flow_action_rss *with) i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
{
return (comp->func == with->func &&
comp->level == with->level &&
comp->types == with->types &&
comp->key_len == with->key_len &&
comp->queue_num == with->queue_num &&
!memcmp(comp->key, with->key, with->key_len) &&
!memcmp(comp->queue, with->queue,
sizeof(*with->queue) * with->queue_num));
}
int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
{ {
struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0; uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
uint16_t j, num; uint64_t hena;
struct rte_eth_rss_conf rss_conf = { int ret;
.rss_key = conf->conf.key_len ?
(void *)(uintptr_t)conf->conf.key : NULL, ret = i40e_set_rss_key(pf->main_vsi, key,
.rss_key_len = conf->conf.key_len, rss_conf->conf.key_len);
.rss_hf = conf->conf.types, if (ret)
}; return ret;
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
return 0;
}
/* Configure hash input set */
static int
i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct rte_eth_input_set_conf conf;
uint64_t mask0;
int ret = 0;
uint32_t j;
int i;
static const struct {
uint64_t type;
enum rte_eth_input_set_field field;
} inset_match_table[] = {
{ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP4},
{ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP4},
{ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP4},
{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP4},
{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP4},
{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP4},
{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP4},
{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP4},
{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP4},
{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP4},
{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP6},
{ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP6},
{ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP6},
{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP6},
{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP6},
{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP6},
{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP6},
{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP6},
{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
RTE_ETH_INPUT_SET_L3_SRC_IP6},
{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
RTE_ETH_INPUT_SET_L3_DST_IP6},
{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
RTE_ETH_INPUT_SET_UNKNOWN},
};
mask0 = types & pf->adapter->flow_types_mask;
conf.op = RTE_ETH_INPUT_SET_SELECT;
conf.inset_size = 0;
for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
if (mask0 & (1ULL << i)) {
conf.flow_type = i;
break;
}
}
for (j = 0; j < RTE_DIM(inset_match_table); j++) {
if ((types & inset_match_table[j].type) ==
inset_match_table[j].type) {
if (inset_match_table[j].field ==
RTE_ETH_INPUT_SET_UNKNOWN)
return -EINVAL;
conf.field[conf.inset_size] =
inset_match_table[j].field;
conf.inset_size++;
}
}
if (conf.inset_size) {
ret = i40e_hash_filter_inset_select(hw, &conf);
if (ret)
return ret;
}
return ret;
}
/* Look up the conflicted rule then mark it as invalid */
static void
i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_rss_filter *rss_item;
uint64_t rss_inset;
/* Clear input set bits before comparing the pctype */
rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
/* Look up the conflicted rule then mark it as invalid */
TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
if (!rss_item->rss_filter_info.valid)
continue;
if (conf->conf.queue_num &&
rss_item->rss_filter_info.conf.queue_num)
rss_item->rss_filter_info.valid = false;
if (conf->conf.types &&
(rss_item->rss_filter_info.conf.types &
rss_inset) ==
(conf->conf.types & rss_inset))
rss_item->rss_filter_info.valid = false;
if (conf->conf.func ==
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
rss_item->rss_filter_info.conf.func ==
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
rss_item->rss_filter_info.valid = false;
}
}
/* Configure RSS hash function */
static int
i40e_rss_config_hash_function(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t reg, i;
uint64_t mask0;
uint16_t j;
if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
I40E_WRITE_FLUSH(hw);
i40e_rss_mark_invalid_rule(pf, conf);
if (!add) {
if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
return 0; return 0;
} }
return -EINVAL; reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
I40E_WRITE_FLUSH(hw);
i40e_rss_mark_invalid_rule(pf, conf);
} else if (conf->conf.func ==
RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
mask0 = conf->conf.types & pf->adapter->flow_types_mask;
i40e_set_symmetric_hash_enable_per_port(hw, 1);
for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
if (mask0 & (1UL << i))
break;
}
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
j < I40E_FILTER_PCTYPE_MAX; j++) {
if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
i40e_write_global_rx_ctl(hw,
I40E_GLQF_HSYM(j),
I40E_GLQF_HSYM_SYMH_ENA_MASK);
}
} }
return 0;
}
/* Enable RSS according to the configuration */
static int
i40e_rss_enable_hash(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
struct i40e_rte_flow_rss_conf rss_conf;
if (!(conf->conf.types & pf->adapter->flow_types_mask))
return -ENOTSUP;
memset(&rss_conf, 0, sizeof(rss_conf));
rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
/* Configure hash input set */
if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
return -EINVAL;
if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
/* Random default keys */
static uint32_t rss_key_default[] = {0x6b793944,
0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
rss_conf.conf.key = (uint8_t *)rss_key_default;
rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
PMD_DRV_LOG(INFO,
"No valid RSS key config for i40e, using default\n");
}
rss_conf.conf.types |= rss_info->conf.types;
i40e_rss_hash_set(pf, &rss_conf);
if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
i40e_rss_config_hash_function(pf, conf);
i40e_rss_mark_invalid_rule(pf, conf);
return 0;
}
/* Configure RSS queue region */
static int
i40e_rss_config_queue_region(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t lut = 0;
uint16_t j, num;
uint32_t i;
/* If both VMDQ and RSS enabled, not all of PF queues are configured. /* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calculate the actual PF queues that are configured. * It's necessary to calculate the actual PF queues that are configured.
*/ */
@ -13014,29 +13274,195 @@ i40e_config_rss_filter(struct i40e_pf *pf,
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
} }
if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { i40e_rss_mark_invalid_rule(pf, conf);
i40e_pf_disable_rss(pf);
return 0;
}
if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
/* Random default keys */
static uint32_t rss_key_default[] = {0x6b793944,
0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
rss_conf.rss_key = (uint8_t *)rss_key_default; return 0;
rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * }
sizeof(uint32_t);
PMD_DRV_LOG(INFO, /* Configure RSS hash function to default */
"No valid RSS key config for i40e, using default\n"); static int
i40e_rss_clear_hash_function(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, reg;
uint64_t mask0;
uint16_t j;
if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
PMD_DRV_LOG(DEBUG,
"Hash function already set to Toeplitz");
I40E_WRITE_FLUSH(hw);
return 0;
}
reg |= I40E_GLQF_CTL_HTOEP_MASK;
i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
I40E_WRITE_FLUSH(hw);
} else if (conf->conf.func ==
RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
mask0 = conf->conf.types & pf->adapter->flow_types_mask;
for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
if (mask0 & (1UL << i))
break;
}
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
j < I40E_FILTER_PCTYPE_MAX; j++) {
if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
i40e_write_global_rx_ctl(hw,
I40E_GLQF_HSYM(j),
0);
}
} }
i40e_hw_rss_hash_set(pf, &rss_conf); return 0;
}
if (i40e_rss_conf_init(rss_info, &conf->conf)) /* Disable RSS hash and configure default input set */
return -EINVAL; static int
i40e_rss_disable_hash(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_rte_flow_rss_conf rss_conf;
uint32_t i;
memset(&rss_conf, 0, sizeof(rss_conf));
rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
/* Disable RSS hash */
rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
i40e_rss_hash_set(pf, &rss_conf);
for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
!(conf->conf.types & (1ULL << i)))
continue;
/* Configure default input set */
struct rte_eth_input_set_conf input_conf = {
.op = RTE_ETH_INPUT_SET_SELECT,
.flow_type = i,
.inset_size = 1,
};
input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
i40e_hash_filter_inset_select(hw, &input_conf);
}
rss_info->conf.types = rss_conf.conf.types;
i40e_rss_clear_hash_function(pf, conf);
return 0;
}
/* Configure RSS queue region to default */
static int
i40e_rss_clear_queue_region(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
uint16_t queue[I40E_MAX_Q_PER_TC];
uint32_t num_rxq, i;
uint32_t lut = 0;
uint16_t j, num;
num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
for (j = 0; j < num_rxq; j++)
queue[j] = j;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calculate the actual PF queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
else
num = pf->dev_data->nb_rx_queues;
num = RTE_MIN(num, num_rxq);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
if (num == 0) {
PMD_DRV_LOG(ERR,
"No PF queues are configured to enable RSS for port %u",
pf->dev_data->port_id);
return -ENOTSUP;
}
/* Fill in redirection table */
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
lut = (lut << 8) | (queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
}
rss_info->conf.queue_num = 0;
memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
return 0;
}
int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
{
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
struct rte_flow_action_rss update_conf = rss_info->conf;
int ret = 0;
if (add) {
if (conf->conf.queue_num) {
/* Configure RSS queue region */
ret = i40e_rss_config_queue_region(pf, conf);
if (ret)
return ret;
update_conf.queue_num = conf->conf.queue_num;
update_conf.queue = conf->conf.queue;
} else if (conf->conf.func ==
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
/* Configure hash function */
ret = i40e_rss_config_hash_function(pf, conf);
if (ret)
return ret;
update_conf.func = conf->conf.func;
} else {
/* Configure hash enable and input set */
ret = i40e_rss_enable_hash(pf, conf);
if (ret)
return ret;
update_conf.types |= conf->conf.types;
update_conf.key = conf->conf.key;
update_conf.key_len = conf->conf.key_len;
}
/* Update RSS info in pf */
if (i40e_rss_conf_init(rss_info, &update_conf))
return -EINVAL;
} else {
if (!conf->valid)
return 0;
if (conf->conf.queue_num)
i40e_rss_clear_queue_region(pf);
else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
i40e_rss_clear_hash_function(pf, conf);
else
i40e_rss_disable_hash(pf, conf);
}
return 0; return 0;
} }

View File

@ -192,6 +192,9 @@ enum i40e_flxpld_layer_idx {
#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
#define I40E_RSS_TYPE_NONE 0ULL
#define I40E_RSS_TYPE_INVALID 1ULL
#define I40E_INSET_NONE 0x00000000000000000ULL #define I40E_INSET_NONE 0x00000000000000000ULL
/* bit0 ~ bit 7 */ /* bit0 ~ bit 7 */
@ -754,6 +757,11 @@ struct i40e_queue_regions {
struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1]; struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1];
}; };
struct i40e_rss_pattern_info {
uint8_t action_flag;
uint64_t types;
};
/* Tunnel filter number HW supports */ /* Tunnel filter number HW supports */
#define I40E_MAX_TUNNEL_FILTER_NUM 400 #define I40E_MAX_TUNNEL_FILTER_NUM 400
@ -973,6 +981,15 @@ struct i40e_rte_flow_rss_conf {
I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) * I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t)]; /* Hash key. */ sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
bool valid; /* Check if it's valid */
};
TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter);
/* RSS filter list structure */
struct i40e_rss_filter {
TAILQ_ENTRY(i40e_rss_filter) next;
struct i40e_rte_flow_rss_conf rss_filter_info;
}; };
struct i40e_vf_msg_cfg { struct i40e_vf_msg_cfg {
@ -1043,7 +1060,8 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */ struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_rte_flow_rss_conf rss_info; /* rss info */ struct i40e_rte_flow_rss_conf rss_info; /* RSS info */
struct i40e_rss_conf_list rss_config_list; /* RSS rule list */
struct i40e_queue_regions queue_region; /* queue region info */ struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list; struct i40e_mirror_rule_list mirror_list;
@ -1343,8 +1361,6 @@ int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size); int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in); const struct rte_flow_action_rss *in);
int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with);
int i40e_config_rss_filter(struct i40e_pf *pf, int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add); struct i40e_rte_flow_rss_conf *conf, bool add);
int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);

View File

@ -4475,29 +4475,80 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
* function for RSS, or flowtype for queue region configuration. * function for RSS, or flowtype for queue region configuration.
* For example: * For example:
* pattern: * pattern:
* Case 1: only ETH, indicate flowtype for queue region will be parsed. * Case 1: try to transform patterns to pctype. valid pctype will be
* Case 2: only VLAN, indicate user_priority for queue region will be parsed. * used in parse action.
* Case 3: none, indicate RSS related will be parsed in action. * Case 2: only ETH, indicate flowtype for queue region will be parsed.
* Any pattern other the ETH or VLAN will be treated as invalid except END. * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
* So, pattern choice is depened on the purpose of configuration of * So, pattern choice is depened on the purpose of configuration of
* that flow. * that flow.
* action: * action:
* action RSS will be uaed to transmit valid parameter with * action RSS will be used to transmit valid parameter with
* struct rte_flow_action_rss for all the 3 case. * struct rte_flow_action_rss for all the 3 case.
*/ */
static int static int
i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern, const struct rte_flow_item *pattern,
struct rte_flow_error *error, struct rte_flow_error *error,
uint8_t *action_flag, struct i40e_rss_pattern_info *p_info,
struct i40e_queue_regions *info) struct i40e_queue_regions *info)
{ {
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item *item = pattern; const struct rte_flow_item *item = pattern;
enum rte_flow_item_type item_type; enum rte_flow_item_type item_type;
struct rte_flow_item *items;
uint32_t item_num = 0; /* non-void item number of pattern*/
uint32_t i = 0;
static const struct {
enum rte_flow_item_type *item_array;
uint64_t type;
} i40e_rss_pctype_patterns[] = {
{ pattern_fdir_ipv4,
ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
{ pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
{ pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
{ pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
{ pattern_fdir_ipv6,
ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
{ pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
{ pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
{ pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
};
if (item->type == RTE_FLOW_ITEM_TYPE_END) p_info->types = I40E_RSS_TYPE_INVALID;
if (item->type == RTE_FLOW_ITEM_TYPE_END) {
p_info->types = I40E_RSS_TYPE_NONE;
return 0; return 0;
}
/* Convert pattern to RSS offload types */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
item_num++;
i++;
}
item_num++;
items = rte_zmalloc("i40e_pattern",
item_num * sizeof(struct rte_flow_item), 0);
if (!items) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL, "No memory for PMD internal items.");
return -ENOMEM;
}
i40e_pattern_skip_void_item(items, pattern);
for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
items)) {
p_info->types = i40e_rss_pctype_patterns[i].type;
rte_free(items);
return 0;
}
}
rte_free(items);
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) { if (item->last) {
@ -4510,7 +4561,7 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type; item_type = item->type;
switch (item_type) { switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH: case RTE_FLOW_ITEM_TYPE_ETH:
*action_flag = 1; p_info->action_flag = 1;
break; break;
case RTE_FLOW_ITEM_TYPE_VLAN: case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec; vlan_spec = item->spec;
@ -4523,7 +4574,7 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_spec->tci) >> 13) & 0x7; vlan_spec->tci) >> 13) & 0x7;
info->region[0].user_priority_num = 1; info->region[0].user_priority_num = 1;
info->queue_region_number = 1; info->queue_region_number = 1;
*action_flag = 0; p_info->action_flag = 0;
} }
} }
break; break;
@ -4540,7 +4591,7 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
} }
/** /**
* This function is used to parse rss queue index, total queue number and * This function is used to parse RSS queue index, total queue number and
* hash functions, If the purpose of this configuration is for queue region * hash functions, If the purpose of this configuration is for queue region
* configuration, it will set queue_region_conf flag to TRUE, else to FALSE. * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
* In queue region configuration, it also need to parse hardware flowtype * In queue region configuration, it also need to parse hardware flowtype
@ -4549,14 +4600,16 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
* be any of the following values: 1, 2, 4, 8, 16, 32, 64, the * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
* hw_flowtype or PCTYPE max index should be 63, the user priority * hw_flowtype or PCTYPE max index should be 63, the user priority
* max index should be 7, and so on. And also, queue index should be * max index should be 7, and so on. And also, queue index should be
* continuous sequence and queue region index should be part of rss * continuous sequence and queue region index should be part of RSS
* queue index for this port. * queue index for this port.
* For hash params, the pctype in action and pattern must be same.
* Set queue index must be with non-types.
*/ */
static int static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev, i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions, const struct rte_flow_action *actions,
struct rte_flow_error *error, struct rte_flow_error *error,
uint8_t action_flag, struct i40e_rss_pattern_info p_info,
struct i40e_queue_regions *conf_info, struct i40e_queue_regions *conf_info,
union i40e_filter_t *filter) union i40e_filter_t *filter)
{ {
@ -4567,7 +4620,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
struct i40e_rte_flow_rss_conf *rss_config = struct i40e_rte_flow_rss_conf *rss_config =
&filter->rss_conf; &filter->rss_conf;
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
uint16_t i, j, n, tmp; uint16_t i, j, n, tmp, nb_types;
uint32_t index = 0; uint32_t index = 0;
uint64_t hf_bit = 1; uint64_t hf_bit = 1;
@ -4575,7 +4628,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
rss = act->conf; rss = act->conf;
/** /**
* rss only supports forwarding, * RSS only supports forwarding,
* check if the first not void action is RSS. * check if the first not void action is RSS.
*/ */
if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
@ -4586,7 +4639,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno; return -rte_errno;
} }
if (action_flag) { if (p_info.action_flag) {
for (n = 0; n < 64; n++) { for (n = 0; n < 64; n++) {
if (rss->types & (hf_bit << n)) { if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n; conf_info->region[0].hw_flowtype[0] = n;
@ -4725,11 +4778,11 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (rss_config->queue_region_conf) if (rss_config->queue_region_conf)
return 0; return 0;
if (!rss || !rss->queue_num) { if (!rss) {
rte_flow_error_set(error, EINVAL, rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, RTE_FLOW_ERROR_TYPE_ACTION,
act, act,
"no valid queues"); "invalid rule");
return -rte_errno; return -rte_errno;
} }
@ -4743,19 +4796,48 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
} }
} }
if (rss_info->conf.queue_num) { if (rss->queue_num && (p_info.types || rss->types))
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
"rss only allow one valid rule");
return -rte_errno;
}
/* Parse RSS related parameters from configuration */
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
return rte_flow_error_set return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"non-default RSS hash functions are not supported"); "RSS types must be empty while configuring queue region");
/* validate pattern and pctype */
if (!(rss->types & p_info.types) &&
(rss->types || p_info.types) && !rss->queue_num)
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
act, "invalid pctype");
nb_types = 0;
for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
if (rss->types & (hf_bit << n))
nb_types++;
if (nb_types > 1)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
act, "multi pctype is not supported");
}
if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
(p_info.types || rss->types || rss->queue_num))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"pattern, type and queues must be empty while"
" setting hash function as simple_xor");
if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
!(p_info.types && rss->types))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"pctype and queues can not be empty while"
" setting hash function as symmetric toeplitz");
/* Parse RSS related parameters from configuration */
if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS hash functions are not supported");
if (rss->level) if (rss->level)
return rte_flow_error_set return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
@ -4797,19 +4879,20 @@ i40e_parse_rss_filter(struct rte_eth_dev *dev,
union i40e_filter_t *filter, union i40e_filter_t *filter,
struct rte_flow_error *error) struct rte_flow_error *error)
{ {
int ret; struct i40e_rss_pattern_info p_info;
struct i40e_queue_regions info; struct i40e_queue_regions info;
uint8_t action_flag = 0; int ret;
memset(&info, 0, sizeof(struct i40e_queue_regions)); memset(&info, 0, sizeof(struct i40e_queue_regions));
memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
ret = i40e_flow_parse_rss_pattern(dev, pattern, ret = i40e_flow_parse_rss_pattern(dev, pattern,
error, &action_flag, &info); error, &p_info, &info);
if (ret) if (ret)
return ret; return ret;
ret = i40e_flow_parse_rss_action(dev, actions, error, ret = i40e_flow_parse_rss_action(dev, actions, error,
action_flag, &info, filter); p_info, &info, filter);
if (ret) if (ret)
return ret; return ret;
@ -4828,15 +4911,33 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev,
{ {
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_rss_filter *rss_filter;
int ret; int ret;
if (conf->queue_region_conf) { if (conf->queue_region_conf) {
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
conf->queue_region_conf = 0;
} else { } else {
ret = i40e_config_rss_filter(pf, conf, 1); ret = i40e_config_rss_filter(pf, conf, 1);
} }
return ret;
if (ret)
return ret;
rss_filter = rte_zmalloc("i40e_rss_filter",
sizeof(*rss_filter), 0);
if (rss_filter == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
return -ENOMEM;
}
rss_filter->rss_filter_info = *conf;
/* the rule new created is always valid
* the existing rule covered by new rule will be set invalid
*/
rss_filter->rss_filter_info.valid = true;
TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
return 0;
} }
static int static int
@ -4845,10 +4946,21 @@ i40e_config_rss_filter_del(struct rte_eth_dev *dev,
{ {
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_rss_filter *rss_filter;
void *temp;
i40e_flush_queue_region_all_conf(dev, hw, pf, 0); if (conf->queue_region_conf)
i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
else
i40e_config_rss_filter(pf, conf, 0);
i40e_config_rss_filter(pf, conf, 0); TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
if (!memcmp(&rss_filter->rss_filter_info, conf,
sizeof(struct rte_flow_action_rss))) {
TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
rte_free(rss_filter);
}
}
return 0; return 0;
} }
@ -4991,7 +5103,8 @@ i40e_flow_create(struct rte_eth_dev *dev,
&cons_filter.rss_conf); &cons_filter.rss_conf);
if (ret) if (ret)
goto free_flow; goto free_flow;
flow->rule = &pf->rss_info; flow->rule = TAILQ_LAST(&pf->rss_config_list,
i40e_rss_conf_list);
break; break;
default: default:
goto free_flow; goto free_flow;
@ -5041,7 +5154,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
break; break;
case RTE_ETH_FILTER_HASH: case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_del(dev, ret = i40e_config_rss_filter_del(dev,
(struct i40e_rte_flow_rss_conf *)flow->rule); &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
break; break;
default: default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
@ -5189,7 +5302,7 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
if (ret) { if (ret) {
rte_flow_error_set(error, -ret, rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush rss flows."); "Failed to flush RSS flows.");
return -rte_errno; return -rte_errno;
} }
@ -5294,18 +5407,32 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret; return ret;
} }
/* remove the rss filter */ /* remove the RSS filter */
static int static int
i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
{ {
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_flow *flow;
void *temp;
int32_t ret = -EINVAL; int32_t ret = -EINVAL;
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
if (rss_info->conf.queue_num) /* Delete RSS flows in flow list. */
ret = i40e_config_rss_filter(pf, rss_info, FALSE); TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
if (flow->filter_type != RTE_ETH_FILTER_HASH)
continue;
if (flow->rule) {
ret = i40e_config_rss_filter_del(dev,
&((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
if (ret)
return ret;
}
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
}
return ret; return ret;
} }