net/i40e: move RSS to flow API

Rte_flow was defined to include RSS, this patch moves i40e
existing RSS to rte_flow. The old RSS configuration is kept
as it was, and can be deprecated in the future.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Wei Zhao 2018-01-10 10:10:26 +08:00 committed by Ferruh Yigit
parent e38ea44f17
commit ecad87d223
4 changed files with 465 additions and 0 deletions

View File

@ -61,6 +61,13 @@ New Features
1, 2, 4, 8 or 16. If no such parameter is configured, the number of queues
per VF is 4 by default.
* **Added the i40e ethernet driver to support RSS with flow API.**
Rte_flow actually defined to include RSS, but till now, RSS is out of
rte_flow. This patch is to support i40e NIC with existing RSS
configuration using rte_flow API.It also enable queue region configuration
using flow API for i40e.
* **Added NVGRE and UDP tunnels support in Solarflare network PMD.**
Added support for NVGRE, VXLAN and GENEVE tunnels.

View File

@ -1313,6 +1313,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
/* initialize rss configuration from rte_flow */
memset(&pf->rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
return 0;
err_init_fdir_filter_list:
@ -11123,12 +11127,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
/* Restore rss filter */
static inline void
i40e_rss_filter_restore(struct i40e_pf *pf)
{
struct i40e_rte_flow_rss_conf *conf =
&pf->rss_info;
if (conf->num)
i40e_config_rss_filter(pf, conf, TRUE);
}
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
i40e_rss_filter_restore(pf);
}
static bool
@ -11583,6 +11598,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
}
int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0;
uint16_t j, num;
struct rte_eth_rss_conf rss_conf = conf->rss_conf;
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
if (!add) {
if (memcmp(conf, rss_info,
sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
return 0;
}
return -EINVAL;
}
if (rss_info->num)
return -EINVAL;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calculate the actual PF queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
else
num = pf->dev_data->nb_rx_queues;
num = RTE_MIN(num, conf->num);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
if (num == 0) {
PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
return -ENOTSUP;
}
/* Fill in redirection table */
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
}
if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
i40e_pf_disable_rss(pf);
return 0;
}
if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
/* Random default keys */
static uint32_t rss_key_default[] = {0x6b793944,
0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
rss_conf.rss_key = (uint8_t *)rss_key_default;
rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
}
return i40e_hw_rss_hash_set(pf, &rss_conf);
rte_memcpy(rss_info,
conf, sizeof(struct i40e_rte_flow_rss_conf));
return 0;
}
RTE_INIT(i40e_init_log);
static void
i40e_init_log(void)

View File

@ -867,6 +867,13 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
struct i40e_rte_flow_rss_conf {
struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
uint16_t queue_region_conf; /**< Queue region config flag */
uint16_t num; /**< Number of entries in queue[]. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
};
/*
* Structure to store private data specific for PF instance.
*/
@ -921,6 +928,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_rte_flow_rss_conf rss_info; /* rss info */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@ -1047,6 +1055,7 @@ union i40e_filter_t {
struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
struct i40e_rte_flow_rss_conf rss_conf;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@ -1177,6 +1186,8 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)

View File

@ -110,6 +110,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@ -4108,6 +4110,316 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
return ret;
}
/**
* This function is used to do configuration i40e existing RSS with rte_flow.
* It also enable queue region configuration using flow API for i40e.
* pattern can be used indicate what parameters will be include in flow,
* like user_priority or flowtype for queue region or HASH function for RSS.
* Action is used to transmit parameter like queue index and HASH
* function for RSS, or flowtype for queue region configuration.
* For example:
* pattern:
* Case 1: only ETH, indicate flowtype for queue region will be parsed.
* Case 2: only VLAN, indicate user_priority for queue region will be parsed.
* Case 3: none, indicate RSS related will be parsed in action.
* Any pattern other the ETH or VLAN will be treated as invalid except END.
* So, pattern choice is depened on the purpose of configuration of
* that flow.
* action:
* action RSS will be uaed to transmit valid parameter with
* struct rte_flow_action_rss for all the 3 case.
*/
static int
i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
uint8_t *action_flag,
struct i40e_queue_regions *info)
{
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item *item = pattern;
enum rte_flow_item_type item_type;
if (item->type == RTE_FLOW_ITEM_TYPE_END)
return 0;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Not support range");
return -rte_errno;
}
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
*action_flag = 1;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec =
(const struct rte_flow_item_vlan *)item->spec;
vlan_mask =
(const struct rte_flow_item_vlan *)item->mask;
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
info->region[0].user_priority[0] =
(vlan_spec->tci >> 13) & 0x7;
info->region[0].user_priority_num = 1;
info->queue_region_number = 1;
*action_flag = 0;
}
}
break;
default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Not support range");
return -rte_errno;
}
}
return 0;
}
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
uint8_t *action_flag,
struct i40e_queue_regions *conf_info,
union i40e_filter_t *filter)
{
const struct rte_flow_action *act;
const struct rte_flow_action_rss *rss;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_queue_regions *info = &pf->queue_region;
struct i40e_rte_flow_rss_conf *rss_config =
&filter->rss_conf;
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
uint16_t i, j, n, tmp;
uint32_t index = 0;
NEXT_ITEM_OF_ACTION(act, actions, index);
rss = (const struct rte_flow_action_rss *)act->conf;
/**
* rss only supports forwarding,
* check if the first not void action is RSS.
*/
if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
if (action_flag) {
for (n = 0; n < 64; n++) {
if (rss->rss_conf->rss_hf & (1 << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
break;
}
}
}
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
if (!((rte_is_power_of_2(rss->num)) &&
rss->num <= 64)) {
PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
"total number of queues do not exceed the VSI allocation");
return -rte_errno;
}
if (conf_info->region[n].user_priority[n] >=
I40E_MAX_USER_PRIORITY) {
PMD_DRV_LOG(ERR, "the user priority max index is 7");
return -rte_errno;
}
if (conf_info->region[n].hw_flowtype[n] >=
I40E_FILTER_PCTYPE_MAX) {
PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
return -rte_errno;
}
if (rss_info->num < rss->num ||
rss_info->queue[0] < rss->queue[0] ||
(rss->queue[0] + rss->num >
rss_info->num + rss_info->queue[0])) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
"no valid queues");
return -rte_errno;
}
for (i = 0; i < info->queue_region_number; i++) {
if (info->region[i].queue_num == rss->num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
}
if (i == info->queue_region_number) {
if (i > I40E_REGION_MAX_INDEX) {
PMD_DRV_LOG(ERR, "the queue region max index is 7");
return -rte_errno;
}
info->region[i].queue_num =
rss->num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
info->queue_region_number;
j = info->region[i].user_priority_num;
tmp = conf_info->region[n].user_priority[0];
if (conf_info->region[n].user_priority_num) {
info->region[i].user_priority[j] = tmp;
info->region[i].user_priority_num++;
}
j = info->region[i].flowtype_num;
tmp = conf_info->region[n].hw_flowtype[0];
if (conf_info->region[n].flowtype_num) {
info->region[i].hw_flowtype[j] = tmp;
info->region[i].flowtype_num++;
}
info->queue_region_number++;
} else {
j = info->region[i].user_priority_num;
tmp = conf_info->region[n].user_priority[0];
if (conf_info->region[n].user_priority_num) {
info->region[i].user_priority[j] = tmp;
info->region[i].user_priority_num++;
}
j = info->region[i].flowtype_num;
tmp = conf_info->region[n].hw_flowtype[0];
if (conf_info->region[n].flowtype_num) {
info->region[i].hw_flowtype[j] = tmp;
info->region[i].flowtype_num++;
}
}
}
rss_config->queue_region_conf = TRUE;
return 0;
}
if (!rss || !rss->num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
"no valid queues");
return -rte_errno;
}
for (n = 0; n < rss->num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
"queue id > max number of queues");
return -rte_errno;
}
}
if (rss->rss_conf)
rss_config->rss_conf = *rss->rss_conf;
else
rss_config->rss_conf.rss_hf =
pf->adapter->flow_types_mask;
for (n = 0; n < rss->num; ++n)
rss_config->queue[n] = rss->queue[n];
rss_config->num = rss->num;
index++;
/* check if the next not void action is END */
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
rss_config->queue_region_conf = FALSE;
return 0;
}
static int
i40e_parse_rss_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
union i40e_filter_t *filter,
struct rte_flow_error *error)
{
int ret;
struct i40e_queue_regions info;
uint8_t action_flag = 0;
memset(&info, 0, sizeof(struct i40e_queue_regions));
ret = i40e_flow_parse_rss_pattern(dev, pattern,
error, &action_flag, &info);
if (ret)
return ret;
ret = i40e_flow_parse_rss_action(dev, actions, error,
&action_flag, &info, filter);
if (ret)
return ret;
ret = i40e_flow_parse_attr(attr, error);
if (ret)
return ret;
cons_filter_type = RTE_ETH_FILTER_HASH;
return 0;
}
static int
i40e_config_rss_filter_set(struct rte_eth_dev *dev,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (conf->queue_region_conf) {
i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
conf->queue_region_conf = 0;
} else {
i40e_config_rss_filter(pf, conf, 1);
}
return 0;
}
static int
i40e_config_rss_filter_del(struct rte_eth_dev *dev,
struct i40e_rte_flow_rss_conf *conf)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
i40e_config_rss_filter(pf, conf, 0);
return 0;
}
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@ -4144,6 +4456,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
/* Get the non-void item of action */
while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
i++;
if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
ret = i40e_parse_rss_filter(dev, attr, pattern,
actions, &cons_filter, error);
return ret;
}
i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@ -4231,6 +4554,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_set(dev,
&cons_filter.rss_conf);
flow->rule = &pf->rss_info;
break;
default:
goto free_flow;
}
@ -4269,6 +4597,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_del(dev,
(struct i40e_rte_flow_rss_conf *)flow->rule);
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@ -4411,6 +4743,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
ret = i40e_flow_flush_rss_filter(dev);
if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush rss flows.");
return -rte_errno;
}
return ret;
}
@ -4506,3 +4846,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
/* remove the rss filter */
static int
i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret = -EINVAL;
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
if (rss_info->num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}