net/e1000: move RSS to flow API

Rte_flow was defined to include RSS, this patch moves igb
existing RSS functionalities into rte_flow. The old RSS
configuration method is kept as it was, and can be deprecated in
the future.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Acked-by: Wei Dai <wei.dai@intel.com>
This commit is contained in:
Wei Zhao 2018-01-09 14:44:29 +08:00 committed by Ferruh Yigit
parent cdcd6b686e
commit 424ae915ba
5 changed files with 264 additions and 0 deletions

View File

@ -55,6 +55,12 @@ New Features
* Added tunneled packets classification.
* Added inner checksum offload.
* **Added the igb ethernet driver to support RSS with flow API.**
Rte_flow actually defined to include RSS, but till now, RSS is out of
rte_flow. This patch is to support igb NIC with existing RSS configuration
using rte_flow API.
API Changes
-----------

View File

@ -228,6 +228,12 @@ struct igb_ethertype_filter {
uint32_t etqf;
};
struct igb_rte_flow_rss_conf {
struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
uint16_t num; /**< Number of entries in queue[]. */
uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
/*
* Structure to store filters'info.
*/
@ -245,6 +251,8 @@ struct e1000_filter_info {
struct e1000_2tuple_filter_list twotuple_list;
/* store the SYN filter info */
uint32_t syn_info;
/* store the rss filter info */
struct igb_rte_flow_rss_conf rss_info;
};
/*
@ -313,6 +321,12 @@ struct igb_flex_filter_ele {
struct rte_eth_flex_filter filter_info;
};
/* rss filter list structure */
struct igb_rss_conf_ele {
TAILQ_ENTRY(igb_rss_conf_ele) entries;
struct igb_rte_flow_rss_conf filter_info;
};
/* igb_flow memory list structure */
struct igb_flow_mem {
TAILQ_ENTRY(igb_flow_mem) entries;
@ -328,6 +342,8 @@ TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
struct igb_syn_filter_list igb_filter_syn_list;
TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
struct igb_flex_filter_list igb_filter_flex_list;
TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
struct igb_rss_filter_list igb_filter_rss_list;
TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
struct igb_flow_mem_list igb_flow_list;
@ -471,4 +487,8 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
int igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf,
bool add);
#endif /* _E1000_ETHDEV_H_ */

View File

@ -919,6 +919,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&igb_filter_ethertype_list);
TAILQ_INIT(&igb_filter_syn_list);
TAILQ_INIT(&igb_filter_flex_list);
TAILQ_INIT(&igb_filter_rss_list);
TAILQ_INIT(&igb_flow_list);
return 0;
@ -978,6 +979,10 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
memset(filter_info->ethertype_filters, 0,
E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
/* clear the rss filter info */
memset(&filter_info->rss_info, 0,
sizeof(struct igb_rte_flow_rss_conf));
/* remove all ntuple filters of the device */
igb_ntuple_filter_uninit(eth_dev);
@ -5599,6 +5604,17 @@ igb_flex_filter_restore(struct rte_eth_dev *dev)
}
}
/* restore rss filter */
static inline void
igb_rss_filter_restore(struct rte_eth_dev *dev)
{
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
if (filter_info->rss_info.num)
igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
}
/* restore all types filter */
static int
igb_filter_restore(struct rte_eth_dev *dev)
@ -5607,6 +5623,7 @@ igb_filter_restore(struct rte_eth_dev *dev)
igb_ethertype_filter_restore(dev);
igb_syn_filter_restore(dev);
igb_flex_filter_restore(dev);
igb_rss_filter_restore(dev);
return 0;
}

View File

@ -1266,6 +1266,101 @@ igb_parse_flex_filter(struct rte_eth_dev *dev,
return 0;
}
static int
igb_parse_rss_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_action actions[],
struct igb_rte_flow_rss_conf *rss_conf,
struct rte_flow_error *error)
{
const struct rte_flow_action *act;
const struct rte_flow_action_rss *rss;
uint16_t n, index;
/**
* rss only supports forwarding,
* check if the first not void action is RSS.
*/
index = 0;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
rss = (const struct rte_flow_action_rss *)act->conf;
if (!rss || !rss->num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
"no valid queues");
return -rte_errno;
}
for (n = 0; n < rss->num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
"queue id > max number of queues");
return -rte_errno;
}
}
if (rss->rss_conf)
rss_conf->rss_conf = *rss->rss_conf;
else
rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
for (n = 0; n < rss->num; ++n)
rss_conf->queue[n] = rss->queue[n];
rss_conf->num = rss->num;
/* check if the next not void item is END */
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
return -rte_errno;
}
/* not supported */
if (attr->egress) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
return -rte_errno;
}
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Error priority.");
return -rte_errno;
}
return 0;
}
/**
* Create a flow rule.
* Theorically one rule can match more than one filters.
@ -1284,11 +1379,13 @@ igb_flow_create(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct rte_eth_flex_filter flex_filter;
struct igb_rte_flow_rss_conf rss_conf;
struct rte_flow *flow = NULL;
struct igb_ntuple_filter_ele *ntuple_filter_ptr;
struct igb_ethertype_filter_ele *ethertype_filter_ptr;
struct igb_eth_syn_filter_ele *syn_filter_ptr;
struct igb_flex_filter_ele *flex_filter_ptr;
struct igb_rss_conf_ele *rss_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
@ -1390,6 +1487,29 @@ igb_flow_create(struct rte_eth_dev *dev,
}
}
memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
ret = igb_parse_rss_filter(dev, attr,
actions, &rss_conf, error);
if (!ret) {
ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
if (!ret) {
rss_filter_ptr = rte_zmalloc("igb_rss_filter",
sizeof(struct igb_rss_conf_ele), 0);
if (!rss_filter_ptr) {
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
rte_memcpy(&rss_filter_ptr->filter_info,
&rss_conf,
sizeof(struct igb_rte_flow_rss_conf));
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
flow->filter_type = RTE_ETH_FILTER_HASH;
return flow;
}
}
out:
TAILQ_REMOVE(&igb_flow_list,
igb_flow_mem_ptr, entries);
@ -1417,6 +1537,7 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct rte_eth_flex_filter flex_filter;
struct igb_rte_flow_rss_conf rss_conf;
int ret;
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
@ -1440,6 +1561,12 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
ret = igb_parse_flex_filter(dev, attr, pattern,
actions, &flex_filter, error);
if (!ret)
return 0;
memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
ret = igb_parse_rss_filter(dev, attr,
actions, &rss_conf, error);
return ret;
}
@ -1458,6 +1585,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
struct igb_eth_syn_filter_ele *syn_filter_ptr;
struct igb_flex_filter_ele *flex_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
struct igb_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@ -1504,6 +1632,17 @@ igb_flow_destroy(struct rte_eth_dev *dev,
rte_free(flex_filter_ptr);
}
break;
case RTE_ETH_FILTER_HASH:
rss_filter_ptr = (struct igb_rss_conf_ele *)
pmd_flow->rule;
ret = igb_config_rss_filter(dev,
&rss_filter_ptr->filter_info, FALSE);
if (!ret) {
TAILQ_REMOVE(&igb_filter_rss_list,
rss_filter_ptr, entries);
rte_free(rss_filter_ptr);
}
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@ -1592,6 +1731,17 @@ igb_clear_all_flex_filter(struct rte_eth_dev *dev)
igb_remove_flex_filter(dev, flex_filter);
}
/* remove the rss filter */
static void
igb_clear_rss_filter(struct rte_eth_dev *dev)
{
struct e1000_filter_info *filter =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
if (filter->rss_info.num)
igb_config_rss_filter(dev, &filter->rss_info, FALSE);
}
void
igb_filterlist_flush(struct rte_eth_dev *dev)
{
@ -1599,6 +1749,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
struct igb_ethertype_filter_ele *ethertype_filter_ptr;
struct igb_eth_syn_filter_ele *syn_filter_ptr;
struct igb_flex_filter_ele *flex_filter_ptr;
struct igb_rss_conf_ele *rss_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
enum rte_filter_type filter_type;
struct rte_flow *pmd_flow;
@ -1641,6 +1792,14 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
flex_filter_ptr, entries);
rte_free(flex_filter_ptr);
break;
case RTE_ETH_FILTER_HASH:
rss_filter_ptr =
(struct igb_rss_conf_ele *)
pmd_flow->rule;
TAILQ_REMOVE(&igb_filter_rss_list,
rss_filter_ptr, entries);
rte_free(rss_filter_ptr);
break;
default:
PMD_DRV_LOG(WARNING, "Filter type"
"(%d) not supported", filter_type);
@ -1664,6 +1823,7 @@ igb_flow_flush(struct rte_eth_dev *dev,
igb_clear_all_ethertype_filter(dev);
igb_clear_syn_filter(dev);
igb_clear_all_flex_filter(dev);
igb_clear_rss_filter(dev);
igb_filterlist_flush(dev);
return 0;

View File

@ -2757,3 +2757,64 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
}
int
igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf, bool add)
{
uint32_t shift;
uint16_t i, j;
struct rte_eth_rss_conf rss_conf = conf->rss_conf;
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!add) {
if (memcmp(conf, &filter_info->rss_info,
sizeof(struct igb_rte_flow_rss_conf)) == 0) {
igb_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct igb_rte_flow_rss_conf));
return 0;
}
return -EINVAL;
}
if (filter_info->rss_info.num)
return -EINVAL;
/* Fill in redirection table. */
shift = (hw->mac.type == e1000_82575) ? 6 : 0;
for (i = 0, j = 0; i < 128; i++, j++) {
union e1000_reta {
uint32_t dword;
uint8_t bytes[4];
} reta;
uint8_t q_idx;
q_idx = conf->queue[j];
if (j == conf->num)
j = 0;
reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
if ((i & 3) == 3)
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
}
/* Configure the RSS key and the RSS protocols used to compute
* the RSS hash of input packets.
*/
if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
igb_rss_disable(dev);
return 0;
}
if (rss_conf.rss_key == NULL)
rss_conf.rss_key = rss_intel_key; /* Default hash key */
igb_hw_rss_hash_set(hw, &rss_conf);
rte_memcpy(&filter_info->rss_info,
conf, sizeof(struct igb_rte_flow_rss_conf));
return 0;
}