ethdev: flatten RSS configuration in flow API

Since its inception, the rte_flow RSS action has been relying in part on
external struct rte_eth_rss_conf for compatibility with the legacy RSS API.
This structure lacks parameters such as the hash algorithm to use, and more
recently, a method to tell which layer RSS should be performed on [1].

Given struct rte_eth_rss_conf will never be flexible enough to represent a
complete RSS configuration (e.g. RETA table), this patch supersedes it by
extending the rte_flow RSS action directly.

A subsequent patch will add a field to use a non-default RSS hash
algorithm. To that end, a field named "types" replaces the field formerly
known as "rss_hf" and standing for "RSS hash functions" as it was
confusing. Actual RSS hash function types are defined by enum
rte_eth_hash_function.

This patch updates all PMDs and example applications accordingly.

It breaks ABI compatibility for the following public functions:

- rte_flow_copy()
- rte_flow_create()
- rte_flow_query()
- rte_flow_validate()

[1] commit 676b605182 ("doc: announce ethdev API change for RSS
    configuration")

Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
Adrien Mazarguil 2018-04-25 17:27:50 +02:00 committed by Ferruh Yigit
parent 19b3bc47c6
commit ac8d22de23
29 changed files with 494 additions and 360 deletions

View File

@ -192,9 +192,8 @@ enum index {
/** Storage for struct rte_flow_action_rss including external data. */
struct action_rss_data {
struct rte_flow_action_rss conf;
uint8_t key[RSS_HASH_KEY_LENGTH];
uint16_t queue[ACTION_RSS_QUEUE_NUM];
struct rte_eth_rss_conf rss_conf;
uint8_t rss_key[RSS_HASH_KEY_LENGTH];
};
/** Maximum number of subsequent tokens and arguments on the stack. */
@ -1587,7 +1586,7 @@ static const struct token token_list[] = {
},
[ACTION_RSS_TYPES] = {
.name = "types",
.help = "RSS hash types",
.help = "specific RSS hash types",
.next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
},
[ACTION_RSS_TYPE] = {
@ -1602,21 +1601,21 @@ static const struct token token_list[] = {
.next = NEXT(action_rss, NEXT_ENTRY(STRING)),
.args = ARGS(ARGS_ENTRY_ARB(0, 0),
ARGS_ENTRY_ARB
(offsetof(struct action_rss_data, rss_conf) +
offsetof(struct rte_eth_rss_conf, rss_key_len),
sizeof(((struct rte_eth_rss_conf *)0)->
rss_key_len)),
ARGS_ENTRY(struct action_rss_data, rss_key)),
(offsetof(struct action_rss_data, conf) +
offsetof(struct rte_flow_action_rss, key_len),
sizeof(((struct rte_flow_action_rss *)0)->
key_len)),
ARGS_ENTRY(struct action_rss_data, key)),
},
[ACTION_RSS_KEY_LEN] = {
.name = "key_len",
.help = "RSS hash key length in bytes",
.next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
.args = ARGS(ARGS_ENTRY_ARB_BOUNDED
(offsetof(struct action_rss_data, rss_conf) +
offsetof(struct rte_eth_rss_conf, rss_key_len),
sizeof(((struct rte_eth_rss_conf *)0)->
rss_key_len),
(offsetof(struct action_rss_data, conf) +
offsetof(struct rte_flow_action_rss, key_len),
sizeof(((struct rte_flow_action_rss *)0)->
key_len),
0,
RSS_HASH_KEY_LENGTH)),
},
@ -2075,27 +2074,24 @@ parse_vc_action_rss(struct context *ctx, const struct token *token,
action_rss_data = ctx->object;
*action_rss_data = (struct action_rss_data){
.conf = (struct rte_flow_action_rss){
.rss_conf = &action_rss_data->rss_conf,
.num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
.types = rss_hf,
.key_len = sizeof(action_rss_data->key),
.queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
.key = action_rss_data->key,
.queue = action_rss_data->queue,
},
.key = "testpmd's default RSS hash key",
.queue = { 0 },
.rss_conf = (struct rte_eth_rss_conf){
.rss_key = action_rss_data->rss_key,
.rss_key_len = sizeof(action_rss_data->rss_key),
.rss_hf = rss_hf,
},
.rss_key = "testpmd's default RSS hash key",
};
for (i = 0; i < action_rss_data->conf.num; ++i)
for (i = 0; i < action_rss_data->conf.queue_num; ++i)
action_rss_data->queue[i] = i;
if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
ctx->port != (portid_t)RTE_PORT_ALL) {
struct rte_eth_dev_info info;
rte_eth_dev_info_get(ctx->port, &info);
action_rss_data->rss_conf.rss_key_len =
RTE_MIN(sizeof(action_rss_data->rss_key),
action_rss_data->conf.key_len =
RTE_MIN(sizeof(action_rss_data->key),
info.hash_key_size);
}
action->conf = &action_rss_data->conf;
@ -2123,7 +2119,7 @@ parse_vc_action_rss_type(struct context *ctx, const struct token *token,
return -1;
if (!(ctx->objdata >> 16) && ctx->object) {
action_rss_data = ctx->object;
action_rss_data->rss_conf.rss_hf = 0;
action_rss_data->conf.types = 0;
}
if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
@ -2142,7 +2138,7 @@ parse_vc_action_rss_type(struct context *ctx, const struct token *token,
if (!ctx->object)
return len;
action_rss_data = ctx->object;
action_rss_data->rss_conf.rss_hf |= rss_type_table[i].rss_type;
action_rss_data->conf.types |= rss_type_table[i].rss_type;
return len;
}
@ -2192,7 +2188,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
if (!ctx->object)
return len;
action_rss_data = ctx->object;
action_rss_data->conf.num = i;
action_rss_data->conf.queue_num = i;
action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
return len;
}

View File

@ -1117,42 +1117,29 @@ flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
off = 0;
if (dst.rss)
*dst.rss = (struct rte_flow_action_rss){
.num = src.rss->num,
.types = src.rss->types,
.key_len = src.rss->key_len,
.queue_num = src.rss->queue_num,
};
off += sizeof(*src.rss);
if (src.rss->num) {
if (src.rss->key_len) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
size = sizeof(*src.rss->queue) * src.rss->num;
size = sizeof(*src.rss->key) * src.rss->key_len;
if (dst.rss)
dst.rss->key = memcpy
((void *)((uintptr_t)dst.rss + off),
src.rss->key, size);
off += size;
}
if (src.rss->queue_num) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
size = sizeof(*src.rss->queue) * src.rss->queue_num;
if (dst.rss)
dst.rss->queue = memcpy
((void *)((uintptr_t)dst.rss + off),
src.rss->queue, size);
off += size;
}
off = RTE_ALIGN_CEIL(off, sizeof(double));
if (dst.rss) {
dst.rss->rss_conf = (void *)((uintptr_t)dst.rss + off);
*(struct rte_eth_rss_conf *)(uintptr_t)
dst.rss->rss_conf = (struct rte_eth_rss_conf){
.rss_key_len = src.rss->rss_conf->rss_key_len,
.rss_hf = src.rss->rss_conf->rss_hf,
};
}
off += sizeof(*src.rss->rss_conf);
if (src.rss->rss_conf->rss_key_len) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
size = sizeof(*src.rss->rss_conf->rss_key) *
src.rss->rss_conf->rss_key_len;
if (dst.rss) {
((struct rte_eth_rss_conf *)(uintptr_t)
dst.rss->rss_conf)->rss_key =
(void *)((uintptr_t)dst.rss + off);
memcpy(dst.rss->rss_conf->rss_key,
src.rss->rss_conf->rss_key,
size);
}
off += size;
}
size = off;
break;
default:

View File

@ -1301,6 +1301,12 @@ Action: ``RSS``
Similar to QUEUE, except RSS is additionally performed on packets to spread
them among several queues according to the provided parameters.
Unlike global RSS settings used by other DPDK APIs, unsetting the ``types``
field does not disable RSS in a flow rule. Doing so instead requests safe
unspecified "best-effort" settings from the underlying PMD, which depending
on the flow rule, may result in anything ranging from empty (single queue)
to all-inclusive RSS.
Note: RSS hash result is stored in the ``hash.rss`` mbuf field which
overlaps ``hash.fdir.lo``. Since `Action: MARK`_ sets the ``hash.fdir.hi``
field only, both can be requested simultaneously.
@ -1309,15 +1315,19 @@ field only, both can be requested simultaneously.
.. table:: RSS
+--------------+--------------------------------+
| Field | Value |
+==============+================================+
| ``rss_conf`` | RSS parameters |
+--------------+--------------------------------+
| ``num`` | number of entries in ``queue`` |
+--------------+--------------------------------+
| ``queue`` | queue indices to use |
+--------------+--------------------------------+
+---------------+---------------------------------------------+
| Field | Value |
+===============+=============================================+
| ``types`` | specific RSS hash types (see ``ETH_RSS_*``) |
+---------------+---------------------------------------------+
| ``key_len`` | hash key length in bytes |
+---------------+---------------------------------------------+
| ``queue_num`` | number of entries in ``queue`` |
+---------------+---------------------------------------------+
| ``key`` | hash key |
+---------------+---------------------------------------------+
| ``queue`` | queue indices to use |
+---------------+---------------------------------------------+
Action: ``PF``
^^^^^^^^^^^^^^

View File

@ -269,6 +269,13 @@ API Changes
present.
* C99-style flexible arrays were replaced with standard pointers in RSS
action and in RAW pattern item structures due to compatibility issues.
* The RSS action was modified to not rely on external
``struct rte_eth_rss_conf`` anymore to instead expose its own and more
appropriately named configuration fields directly
(``rss_conf->rss_key`` => ``key``,
``rss_conf->rss_key_len`` => ``key_len``,
``rss_conf->rss_hf`` => ``types``,
``num`` => ``queue_num``).
ABI Changes
@ -317,9 +324,9 @@ ABI Changes
``rte_flow_isolate``, ``rte_flow_query`` and ``rte_flow_validate``, due to
changes in error type definitions (``enum rte_flow_error_type``), removal
of the unused DUP action (``enum rte_flow_action_type``), modified
behavior for flow rule actions (see API changes) and removal of C99
flexible arrays from RSS action (``struct rte_flow_action_rss``) and RAW
pattern item (``struct rte_flow_item_raw``).
behavior for flow rule actions (see API changes), removal of C99 flexible
array from RAW pattern item (``struct rte_flow_item_raw``) and complete
rework of the RSS action definition (``struct rte_flow_action_rss``).
Removed Items

View File

@ -3422,8 +3422,10 @@ This section lists supported actions and their attributes, if any.
- ``rss``: spread packets among several queues.
- ``types [{RSS hash type} [...]] end``: RSS hash types, allowed tokens
are the same as `set_hash_input_set`_, an empty list means none (0).
- ``types [{RSS hash type} [...]] end``: specific RSS hash types, allowed
tokens are the same as `set_hash_input_set`_, except that an empty list
does not disable RSS but instead requests unspecified "best-effort"
settings.
- ``key {string}``: RSS hash key, overrides ``key_len``.

View File

@ -4,6 +4,10 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
#include <stdint.h>
#include <rte_flow.h>
#include <rte_time.h>
#include <rte_pci.h>
@ -27,6 +31,7 @@
#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
#define IGB_VFTA_SIZE 128
#define IGB_HKEY_MAX_INDEX 10
#define IGB_MAX_RX_QUEUE_NUM 8
#define IGB_MAX_RX_QUEUE_NUM_82576 16
@ -229,8 +234,8 @@ struct igb_ethertype_filter {
};
struct igb_rte_flow_rss_conf {
struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
uint16_t num; /**< Number of entries in queue[]. */
struct rte_flow_action_rss conf; /**< RSS parameters. */
uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@ -501,6 +506,10 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
int igb_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with);
int igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf,
bool add);

View File

@ -41,8 +41,6 @@
#define IGB_DEFAULT_TX_HTHRESH 1
#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
#define IGB_HKEY_MAX_INDEX 10
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
@ -5662,7 +5660,7 @@ igb_rss_filter_restore(struct rte_eth_dev *dev)
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
if (filter_info->rss_info.num)
if (filter_info->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
}

View File

@ -1292,7 +1292,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
if (!rss || !rss->num) {
if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@ -1300,7 +1300,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
for (n = 0; n < rss->num; n++) {
for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -1310,14 +1310,18 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
}
}
if (rss->rss_conf)
rss_conf->rss_conf = *rss->rss_conf;
else
rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
for (n = 0; n < rss->num; ++n)
rss_conf->queue[n] = rss->queue[n];
rss_conf->num = rss->num;
if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS hash key must be exactly 40 bytes");
if (rss->queue_num > RTE_DIM(rss_conf->queue))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"too many queues for RSS context");
if (igb_rss_conf_init(rss_conf, rss))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS context initialization failure");
/* check if the next not void item is END */
index++;
@ -1518,9 +1522,8 @@ igb_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
rte_memcpy(&rss_filter_ptr->filter_info,
&rss_conf,
sizeof(struct igb_rte_flow_rss_conf));
igb_rss_conf_init(&rss_filter_ptr->filter_info,
&rss_conf.conf);
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
@ -1757,7 +1760,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev)
struct e1000_filter_info *filter =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
if (filter->rss_info.num)
if (filter->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter->rss_info, FALSE);
}

View File

@ -2897,13 +2897,48 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.offloads = txq->offloads;
}
int
igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in)
{
if (in->key_len > RTE_DIM(out->key) ||
in->queue_num > RTE_DIM(out->queue))
return -EINVAL;
out->conf = (struct rte_flow_action_rss){
.types = in->types,
.key_len = in->key_len,
.queue_num = in->queue_num,
.key = memcpy(out->key, in->key, in->key_len),
.queue = memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num),
};
return 0;
}
int
igb_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with)
{
return (comp->types == with->types &&
comp->key_len == with->key_len &&
comp->queue_num == with->queue_num &&
!memcmp(comp->key, with->key, with->key_len) &&
!memcmp(comp->queue, with->queue,
sizeof(*with->queue) * with->queue_num));
}
int
igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf, bool add)
{
uint32_t shift;
uint16_t i, j;
struct rte_eth_rss_conf rss_conf = conf->rss_conf;
struct rte_eth_rss_conf rss_conf = {
.rss_key = conf->conf.key_len ?
(void *)(uintptr_t)conf->conf.key : NULL,
.rss_key_len = conf->conf.key_len,
.rss_hf = conf->conf.types,
};
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@ -2911,8 +2946,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!add) {
if (memcmp(conf, &filter_info->rss_info,
sizeof(struct igb_rte_flow_rss_conf)) == 0) {
if (igb_action_rss_same(&filter_info->rss_info.conf,
&conf->conf)) {
igb_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct igb_rte_flow_rss_conf));
@ -2921,7 +2956,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
if (filter_info->rss_info.num)
if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table. */
@ -2933,9 +2968,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
} reta;
uint8_t q_idx;
if (j == conf->num)
if (j == conf->conf.queue_num)
j = 0;
q_idx = conf->queue[j];
q_idx = conf->conf.queue[j];
reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
if ((i & 3) == 3)
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
@ -2952,8 +2987,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
igb_hw_rss_hash_set(hw, &rss_conf);
rte_memcpy(&filter_info->rss_info,
conf, sizeof(struct igb_rte_flow_rss_conf));
if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))
return -EINVAL;
return 0;
}

View File

@ -11,6 +11,7 @@
#include <inttypes.h>
#include <assert.h>
#include <rte_common.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
@ -11650,7 +11651,7 @@ i40e_rss_filter_restore(struct i40e_pf *pf)
{
struct i40e_rte_flow_rss_conf *conf =
&pf->rss_info;
if (conf->num)
if (conf->conf.queue_num)
i40e_config_rss_filter(pf, conf, TRUE);
}
@ -12181,6 +12182,36 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
}
int
i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in)
{
if (in->key_len > RTE_DIM(out->key) ||
in->queue_num > RTE_DIM(out->queue))
return -EINVAL;
out->conf = (struct rte_flow_action_rss){
.types = in->types,
.key_len = in->key_len,
.queue_num = in->queue_num,
.key = memcpy(out->key, in->key, in->key_len),
.queue = memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num),
};
return 0;
}
int
i40e_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with)
{
return (comp->types == with->types &&
comp->key_len == with->key_len &&
comp->queue_num == with->queue_num &&
!memcmp(comp->key, with->key, with->key_len) &&
!memcmp(comp->queue, with->queue,
sizeof(*with->queue) * with->queue_num));
}
int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
@ -12188,12 +12219,16 @@ i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0;
uint16_t j, num;
struct rte_eth_rss_conf rss_conf = conf->rss_conf;
struct rte_eth_rss_conf rss_conf = {
.rss_key = conf->conf.key_len ?
(void *)(uintptr_t)conf->conf.key : NULL,
.rss_key_len = conf->conf.key_len,
.rss_hf = conf->conf.types,
};
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
if (!add) {
if (memcmp(conf, rss_info,
sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
@ -12202,7 +12237,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return -EINVAL;
}
if (rss_info->num)
if (rss_info->conf.queue_num)
return -EINVAL;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
@ -12213,7 +12248,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
else
num = pf->dev_data->nb_rx_queues;
num = RTE_MIN(num, conf->num);
num = RTE_MIN(num, conf->conf.queue_num);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
@ -12226,7 +12261,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
@ -12251,8 +12286,8 @@ i40e_config_rss_filter(struct i40e_pf *pf,
i40e_hw_rss_hash_set(pf, &rss_conf);
rte_memcpy(rss_info,
conf, sizeof(struct i40e_rte_flow_rss_conf));
if (i40e_rss_conf_init(rss_info, &conf->conf))
return -EINVAL;
return 0;
}

View File

@ -5,14 +5,19 @@
#ifndef _I40E_ETHDEV_H_
#define _I40E_ETHDEV_H_
#include <stdint.h>
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tm_driver.h>
#include "rte_pmd_i40e.h"
#include "base/i40e_register.h"
#define I40E_VLAN_TAG_SIZE 4
#define I40E_AQ_LEN 32
@ -878,9 +883,11 @@ struct i40e_customized_pctype {
};
struct i40e_rte_flow_rss_conf {
struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
struct rte_flow_action_rss conf; /**< RSS parameters. */
uint16_t queue_region_conf; /**< Queue region config flag */
uint16_t num; /**< Number of entries in queue[]. */
uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX) + 1 *
sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
};
@ -1219,6 +1226,10 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with);
int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add);

View File

@ -4220,7 +4220,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (action_flag) {
for (n = 0; n < 64; n++) {
if (rss->rss_conf->rss_hf & (hf_bit << n)) {
if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
@ -4236,12 +4236,12 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
* queue index for this port.
*/
if (conf_info->queue_region_number) {
for (i = 0; i < rss->num; i++) {
for (j = 0; j < rss_info->num; j++) {
if (rss->queue[i] == rss_info->queue[j])
for (i = 0; i < rss->queue_num; i++) {
for (j = 0; j < rss_info->conf.queue_num; j++) {
if (rss->queue[i] == rss_info->conf.queue[j])
break;
}
if (j == rss_info->num) {
if (j == rss_info->conf.queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@ -4250,7 +4250,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
}
for (i = 0; i < rss->num - 1; i++) {
for (i = 0; i < rss->queue_num - 1; i++) {
if (rss->queue[i + 1] != rss->queue[i] + 1) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -4265,8 +4265,8 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
if (!((rte_is_power_of_2(rss->num)) &&
rss->num <= 64)) {
if (!((rte_is_power_of_2(rss->queue_num)) &&
rss->queue_num <= 64)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@ -4294,7 +4294,8 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
for (i = 0; i < info->queue_region_number; i++) {
if (info->region[i].queue_num == rss->num &&
if (info->region[i].queue_num ==
rss->queue_num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
@ -4310,7 +4311,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
info->region[i].queue_num =
rss->num;
rss->queue_num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
@ -4356,7 +4357,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (rss_config->queue_region_conf)
return 0;
if (!rss || !rss->num) {
if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@ -4364,7 +4365,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
for (n = 0; n < rss->num; n++) {
for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -4375,15 +4376,19 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
/* Parse RSS related parameters from configuration */
if (rss->rss_conf)
rss_config->rss_conf = *rss->rss_conf;
else
rss_config->rss_conf.rss_hf =
pf->adapter->flow_types_mask;
if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS hash key too large");
if (rss->queue_num > RTE_DIM(rss_config->queue))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"too many queues for RSS context");
if (i40e_rss_conf_init(rss_config, rss))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS context initialization failure");
for (n = 0; n < rss->num; ++n)
rss_config->queue[n] = rss->queue[n];
rss_config->num = rss->num;
index++;
/* check if the next not void action is END */
@ -4903,7 +4908,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
if (rss_info->num)
if (rss_info->conf.queue_num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}

View File

@ -100,8 +100,6 @@
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
#define IXGBE_HKEY_MAX_INDEX 10
/* Additional timesync values. */
#define NSEC_PER_SEC 1000000000L
#define IXGBE_INCVAL_10GB 0x66666666
@ -8371,7 +8369,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
if (filter_info->rss_info.num)
if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev,
&filter_info->rss_info, TRUE);
}

View File

@ -4,6 +4,9 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
#include <stdint.h>
#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
@ -12,6 +15,7 @@
#ifdef RTE_LIBRTE_SECURITY
#include "ixgbe_ipsec.h"
#endif
#include <rte_flow.h>
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
@ -39,6 +43,7 @@
#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
#define IXGBE_HKEY_MAX_INDEX 10
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_MAX_INTR_QUEUE_NUM 15
#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
@ -196,8 +201,8 @@ struct ixgbe_hw_fdir_info {
};
struct ixgbe_rte_flow_rss_conf {
struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
uint16_t num; /**< Number of entries in queue[]. */
struct rte_flow_action_rss conf; /**< RSS parameters. */
uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@ -696,6 +701,10 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with);
int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add);

View File

@ -2761,7 +2761,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
if (!rss || !rss->num) {
if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@ -2769,7 +2769,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
for (n = 0; n < rss->num; n++) {
for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -2778,14 +2778,19 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
}
if (rss->rss_conf)
rss_conf->rss_conf = *rss->rss_conf;
else
rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
for (n = 0; n < rss->num; ++n)
rss_conf->queue[n] = rss->queue[n];
rss_conf->num = rss->num;
if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS hash key must be exactly 40 bytes");
if (rss->queue_num > RTE_DIM(rss_conf->queue))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"too many queues for RSS context");
if (ixgbe_rss_conf_init(rss_conf, rss))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS context initialization failure");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
@ -2834,7 +2839,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
if (filter_info->rss_info.num)
if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
}
@ -3153,9 +3158,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
rte_memcpy(&rss_filter_ptr->filter_info,
&rss_conf,
sizeof(struct ixgbe_rte_flow_rss_conf));
ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
&rss_conf.conf);
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;

View File

@ -5675,6 +5675,36 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
}
int
ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in)
{
if (in->key_len > RTE_DIM(out->key) ||
in->queue_num > RTE_DIM(out->queue))
return -EINVAL;
out->conf = (struct rte_flow_action_rss){
.types = in->types,
.key_len = in->key_len,
.queue_num = in->queue_num,
.key = memcpy(out->key, in->key, in->key_len),
.queue = memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num),
};
return 0;
}
int
ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with)
{
return (comp->types == with->types &&
comp->key_len == with->key_len &&
comp->queue_num == with->queue_num &&
!memcmp(comp->key, with->key, with->key_len) &&
!memcmp(comp->queue, with->queue,
sizeof(*with->queue) * with->queue_num));
}
int
ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add)
@ -5685,7 +5715,12 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
uint16_t j;
uint16_t sp_reta_size;
uint32_t reta_reg;
struct rte_eth_rss_conf rss_conf = conf->rss_conf;
struct rte_eth_rss_conf rss_conf = {
.rss_key = conf->conf.key_len ?
(void *)(uintptr_t)conf->conf.key : NULL,
.rss_key_len = conf->conf.key_len,
.rss_hf = conf->conf.types,
};
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@ -5695,8 +5730,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (!add) {
if (memcmp(conf, &filter_info->rss_info,
sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
&conf->conf)) {
ixgbe_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct ixgbe_rte_flow_rss_conf));
@ -5705,7 +5740,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
if (filter_info->rss_info.num)
if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table
* The byte-swap is needed because NIC registers are in
@ -5715,9 +5750,9 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
if (j == conf->num)
if (j == conf->conf.queue_num)
j = 0;
reta = (reta << 8) | conf->queue[j];
reta = (reta << 8) | conf->conf.queue[j];
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
@ -5734,8 +5769,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
ixgbe_hw_rss_hash_set(hw, &rss_conf);
rte_memcpy(&filter_info->rss_info,
conf, sizeof(struct ixgbe_rte_flow_rss_conf));
if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
return -EINVAL;
return 0;
}

View File

@ -569,7 +569,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
" for UDP RSS and inner VXLAN RSS");
/* Fake support for all possible RSS hash fields. */
priv->hw_rss_sup = ~UINT64_C(0);
priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1);
priv->hw_rss_sup = mlx4_conv_rss_types(priv, -1);
/* Filter out known unsupported fields. */
priv->hw_rss_sup &=
~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP |

View File

@ -76,22 +76,22 @@ struct mlx4_drop {
};
/**
* Convert DPDK RSS hash fields to their Verbs equivalent.
* Convert DPDK RSS hash types to their Verbs equivalent.
*
* This function returns the supported (default) set when @p rss_hf has
* This function returns the supported (default) set when @p types has
* special value (uint64_t)-1.
*
* @param priv
* Pointer to private structure.
* @param rss_hf
* Hash fields in DPDK format (see struct rte_eth_rss_conf).
* @param types
* Hash types in DPDK format (see struct rte_eth_rss_conf).
*
* @return
* A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
* otherwise and rte_errno is set.
*/
uint64_t
mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
mlx4_conv_rss_types(struct priv *priv, uint64_t types)
{
enum { IPV4, IPV6, TCP, UDP, };
const uint64_t in[] = {
@ -126,17 +126,17 @@ mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
unsigned int i;
for (i = 0; i != RTE_DIM(in); ++i)
if (rss_hf & in[i]) {
seen |= rss_hf & in[i];
if (types & in[i]) {
seen |= types & in[i];
conv |= out[i];
}
if ((conv & priv->hw_rss_sup) == conv) {
if (rss_hf == (uint64_t)-1) {
if (types == (uint64_t)-1) {
/* Include inner RSS by default if supported. */
conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER;
return conv;
}
if (!(rss_hf & ~seen))
if (!(types & ~seen))
return conv;
}
rte_errno = ENOTSUP;
@ -717,7 +717,8 @@ mlx4_flow_prepare(struct priv *priv,
switch (action->type) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_eth_rss_conf *rss_conf;
const uint8_t *rss_key;
uint32_t rss_key_len;
uint64_t fields;
unsigned int i;
@ -747,58 +748,56 @@ mlx4_flow_prepare(struct priv *priv,
break;
rss = action->conf;
/* Default RSS configuration if none is provided. */
rss_conf =
rss->rss_conf ?
rss->rss_conf :
&(struct rte_eth_rss_conf){
.rss_key = mlx4_rss_hash_key_default,
.rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
.rss_hf = -1,
};
if (rss->key_len) {
rss_key = rss->key;
rss_key_len = rss->key_len;
} else {
rss_key = mlx4_rss_hash_key_default;
rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
}
/* Sanity checks. */
for (i = 0; i < rss->num; ++i)
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
priv->dev->data->nb_rx_queues)
break;
if (i != rss->num) {
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
}
if (!rte_is_power_of_2(rss->num)) {
if (!rte_is_power_of_2(rss->queue_num)) {
msg = "for RSS, mlx4 requires the number of"
" queues to be a power of two";
goto exit_action_not_supported;
}
if (rss_conf->rss_key_len !=
sizeof(flow->rss->key)) {
if (rss_key_len != sizeof(flow->rss->key)) {
msg = "mlx4 supports exactly one RSS hash key"
" length: "
MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
goto exit_action_not_supported;
}
for (i = 1; i < rss->num; ++i)
for (i = 1; i < rss->queue_num; ++i)
if (rss->queue[i] - rss->queue[i - 1] != 1)
break;
if (i != rss->num) {
if (i != rss->queue_num) {
msg = "mlx4 requires RSS contexts to use"
" consecutive queue indices only";
goto exit_action_not_supported;
}
if (rss->queue[0] % rss->num) {
if (rss->queue[0] % rss->queue_num) {
msg = "mlx4 requires the first queue of a RSS"
" context to be aligned on a multiple"
" of the context size";
goto exit_action_not_supported;
}
rte_errno = 0;
fields = mlx4_conv_rss_hf(priv, rss_conf->rss_hf);
fields = mlx4_conv_rss_types(priv, rss->types);
if (fields == (uint64_t)-1 && rte_errno) {
msg = "unsupported RSS hash type requested";
goto exit_action_not_supported;
}
flow->rss = mlx4_rss_get
(priv, fields, rss_conf->rss_key, rss->num,
(priv, fields, rss_key, rss->queue_num,
rss->queue);
if (!flow->rss) {
msg = "either invalid parameters or not enough"
@ -1284,8 +1283,10 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.rss_conf = NULL, /* Rely on default fallback settings. */
.num = queues,
.types = -1,
.key_len = MLX4_RSS_HASH_KEY_SIZE,
.queue_num = queues,
.key = mlx4_rss_hash_key_default,
.queue = queue,
};
struct rte_flow_action actions[] = {

View File

@ -47,7 +47,7 @@ struct rte_flow {
/* mlx4_flow.c */
uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf);
uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t rss_hf);
int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,

View File

@ -88,7 +88,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
*/
struct mlx4_rss *
mlx4_rss_get(struct priv *priv, uint64_t fields,
uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[])
{
struct mlx4_rss *rss;

View File

@ -127,7 +127,7 @@ uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
int mlx4_rss_init(struct priv *priv);
void mlx4_rss_deinit(struct priv *priv);
struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[]);
void mlx4_rss_put(struct mlx4_rss *rss);
int mlx4_rss_attach(struct mlx4_rss *rss);

View File

@ -214,9 +214,8 @@ struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
uint16_t queues_n; /**< Number of entries in queue[]. */
struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t (*queues)[]; /**< Queues indexes to use. */
struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
@ -406,9 +405,8 @@ struct mlx5_flow_parse {
uint32_t mark:1; /**< Mark is present in the flow. */
uint32_t count:1; /**< Count is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
uint16_t queues_n; /**< Number of entries in queue[]. */
struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
enum hash_rxq_type layer; /**< Last pattern layer detected. */
struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
@ -539,47 +537,6 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
return 0;
}
/**
* Copy the RSS configuration from the user ones, of the rss_conf is null,
* uses the driver one.
*
* @param parser
* Internal parser structure.
* @param rss_conf
* User RSS configuration to save.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
const struct rte_eth_rss_conf *rss_conf)
{
/*
* This function is also called at the beginning of
* mlx5_flow_convert_actions() to initialize the parser with the
* device default RSS configuration.
*/
if (rss_conf) {
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
rte_errno = EINVAL;
return -rte_errno;
}
if (rss_conf->rss_key_len != 40) {
rte_errno = EINVAL;
return -rte_errno;
}
if (rss_conf->rss_key_len && rss_conf->rss_key) {
parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
memcpy(parser->rss_key, rss_conf->rss_key,
rss_conf->rss_key_len);
parser->rss_conf.rss_key = parser->rss_key;
}
parser->rss_conf.rss_hf = rss_conf->rss_hf;
}
return 0;
}
/**
* Extract attribute to the parser.
*
@ -650,17 +607,7 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
enum { FATE = 1, MARK = 2, COUNT = 4, };
uint32_t overlap = 0;
struct priv *priv = dev->data->dev_private;
int ret;
/*
* Add default RSS configuration necessary for Verbs to create QP even
* if no RSS is necessary.
*/
ret = mlx5_flow_convert_rss_conf(parser,
(const struct rte_eth_rss_conf *)
&priv->rss_conf);
if (ret)
return ret;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
@ -679,25 +626,53 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
overlap |= FATE;
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
parser->queues_n = 1;
parser->queues[0] = queue->index;
parser->rss_conf = (struct rte_flow_action_rss){
.queue_num = 1,
.queue = parser->queues,
};
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss =
(const struct rte_flow_action_rss *)
actions->conf;
const uint8_t *rss_key;
uint32_t rss_key_len;
uint16_t n;
if (overlap & FATE)
goto exit_action_overlap;
overlap |= FATE;
if (!rss || !rss->num) {
if (rss->types & MLX5_RSS_HF_MASK) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"unsupported RSS type"
" requested");
return -rte_errno;
}
if (rss->key_len) {
rss_key_len = rss->key_len;
rss_key = rss->key;
} else {
rss_key_len = rss_hash_default_key_len;
rss_key = rss_hash_default_key;
}
if (rss_key_len != RTE_DIM(parser->rss_key)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"RSS hash key must be"
" exactly 40 bytes long");
return -rte_errno;
}
if (!rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no valid queues");
return -rte_errno;
}
if (rss->num > RTE_DIM(parser->queues)) {
if (rss->queue_num > RTE_DIM(parser->queues)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
@ -705,7 +680,7 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
" context");
return -rte_errno;
}
for (n = 0; n < rss->num; ++n) {
for (n = 0; n < rss->queue_num; ++n) {
if (rss->queue[n] >= priv->rxqs_n) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -715,16 +690,16 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
return -rte_errno;
}
}
for (n = 0; n < rss->num; ++n)
parser->queues[n] = rss->queue[n];
parser->queues_n = rss->num;
if (mlx5_flow_convert_rss_conf(parser, rss->rss_conf)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"wrong RSS configuration");
return -rte_errno;
}
parser->rss_conf = (struct rte_flow_action_rss){
.types = rss->types,
.key_len = rss_key_len,
.queue_num = rss->queue_num,
.key = memcpy(parser->rss_key, rss_key,
sizeof(*rss_key) * rss_key_len),
.queue = memcpy(parser->queues, rss->queue,
sizeof(*rss->queue) *
rss->queue_num),
};
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
@ -769,7 +744,7 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
parser->drop = 1;
if (parser->drop && parser->mark)
parser->mark = 0;
if (!parser->queues_n && !parser->drop) {
if (!parser->rss_conf.queue_num && !parser->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
@ -951,7 +926,7 @@ mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)
unsigned int i;
/* Remove any other flow not matching the pattern. */
if (parser->queues_n == 1 && !parser->rss_conf.rss_hf) {
if (parser->rss_conf.queue_num == 1 && !parser->rss_conf.types) {
for (i = 0; i != hash_rxq_init_n; ++i) {
if (i == HASH_RXQ_ETH)
continue;
@ -979,7 +954,7 @@ mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)
}
/* Remove impossible flow according to the RSS configuration. */
if (hash_rxq_init[parser->layer].dpdk_rss_hf &
parser->rss_conf.rss_hf) {
parser->rss_conf.types) {
/* Remove any other flow. */
for (i = hmin; i != (hmax + 1); ++i) {
if ((i == parser->layer) ||
@ -990,7 +965,7 @@ mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)
}
} else if (!parser->queue[ip].ibv_attr) {
/* no RSS possible with the current configuration. */
parser->queues_n = 1;
parser->rss_conf.queue_num = 1;
return;
}
fill:
@ -1119,7 +1094,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
for (i = 0; i != hash_rxq_init_n; ++i) {
unsigned int offset;
if (!(parser->rss_conf.rss_hf &
if (!(parser->rss_conf.types &
hash_rxq_init[i].dpdk_rss_hf) &&
(i != HASH_RXQ_ETH))
continue;
@ -1787,20 +1762,20 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
continue;
flow->frxq[i].hrxq =
mlx5_hrxq_get(dev,
parser->rss_conf.rss_key,
parser->rss_conf.rss_key_len,
parser->rss_conf.key,
parser->rss_conf.key_len,
hash_fields,
parser->queues,
parser->queues_n);
parser->rss_conf.queue,
parser->rss_conf.queue_num);
if (flow->frxq[i].hrxq)
continue;
flow->frxq[i].hrxq =
mlx5_hrxq_new(dev,
parser->rss_conf.rss_key,
parser->rss_conf.rss_key_len,
parser->rss_conf.key,
parser->rss_conf.key_len,
hash_fields,
parser->queues,
parser->queues_n);
parser->rss_conf.queue,
parser->rss_conf.queue_num);
if (!flow->frxq[i].hrxq) {
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
@ -1871,9 +1846,9 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
NULL, "internal error in flow creation");
goto error;
}
for (i = 0; i != parser->queues_n; ++i) {
for (i = 0; i != parser->rss_conf.queue_num; ++i) {
struct mlx5_rxq_data *q =
(*priv->rxqs)[parser->queues[i]];
(*priv->rxqs)[parser->rss_conf.queue[i]];
q->mark |= parser->mark;
}
@ -1937,7 +1912,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
if (ret)
goto exit;
flow = rte_calloc(__func__, 1,
sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
sizeof(*flow) +
parser.rss_conf.queue_num * sizeof(uint16_t),
0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
@ -1946,15 +1922,20 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
"cannot allocate flow memory");
return NULL;
}
/* Copy queues configuration. */
/* Copy configuration. */
flow->queues = (uint16_t (*)[])(flow + 1);
memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
flow->queues_n = parser.queues_n;
flow->rss_conf = (struct rte_flow_action_rss){
.types = parser.rss_conf.types,
.key_len = parser.rss_conf.key_len,
.queue_num = parser.rss_conf.queue_num,
.key = memcpy(flow->rss_key, parser.rss_conf.key,
sizeof(*parser.rss_conf.key) *
parser.rss_conf.key_len),
.queue = memcpy(flow->queues, parser.rss_conf.queue,
sizeof(*parser.rss_conf.queue) *
parser.rss_conf.queue_num),
};
flow->mark = parser.mark;
/* Copy RSS configuration. */
flow->rss_conf = parser.rss_conf;
flow->rss_conf.rss_key = flow->rss_key;
memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
/* finalise the flow. */
if (parser.drop)
ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
@ -2034,7 +2015,7 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
if (flow->drop || !flow->mark)
goto free;
for (i = 0; i != flow->queues_n; ++i) {
for (i = 0; i != flow->rss_conf.queue_num; ++i) {
struct rte_flow *tmp;
int mark = 0;
@ -2344,19 +2325,19 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
if (!flow->frxq[i].ibv_attr)
continue;
flow->frxq[i].hrxq =
mlx5_hrxq_get(dev, flow->rss_conf.rss_key,
flow->rss_conf.rss_key_len,
mlx5_hrxq_get(dev, flow->rss_conf.key,
flow->rss_conf.key_len,
hash_rxq_init[i].hash_fields,
(*flow->queues),
flow->queues_n);
flow->rss_conf.queue,
flow->rss_conf.queue_num);
if (flow->frxq[i].hrxq)
goto flow_create;
flow->frxq[i].hrxq =
mlx5_hrxq_new(dev, flow->rss_conf.rss_key,
flow->rss_conf.rss_key_len,
mlx5_hrxq_new(dev, flow->rss_conf.key,
flow->rss_conf.key_len,
hash_rxq_init[i].hash_fields,
(*flow->queues),
flow->queues_n);
flow->rss_conf.queue,
flow->rss_conf.queue_num);
if (!flow->frxq[i].hrxq) {
DRV_LOG(DEBUG,
"port %u flow %p cannot be applied",
@ -2380,8 +2361,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
}
if (!flow->mark)
continue;
for (i = 0; i != flow->queues_n; ++i)
(*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
for (i = 0; i != flow->rss_conf.queue_num; ++i)
(*priv->rxqs)[flow->rss_conf.queue[i]]->mark = 1;
}
return 0;
}
@ -2458,8 +2439,10 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
};
uint16_t queue[priv->reta_idx_n];
struct rte_flow_action_rss action_rss = {
.rss_conf = &priv->rss_conf,
.num = priv->reta_idx_n,
.types = priv->rss_conf.rss_hf,
.key_len = priv->rss_conf.rss_key_len,
.queue_num = priv->reta_idx_n,
.key = priv->rss_conf.rss_key,
.queue = queue,
};
struct rte_flow_action actions[] = {

View File

@ -1218,8 +1218,8 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
* The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
uint16_t queues_n)
mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
@ -1286,8 +1286,8 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
* An indirection table if found.
*/
struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
uint16_t queues_n)
mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
@ -1391,8 +1391,10 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
* The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
@ -1408,6 +1410,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
rte_errno = ENOMEM;
return NULL;
}
if (!rss_key_len) {
rss_key_len = rss_hash_default_key_len;
rss_key = rss_hash_default_key;
}
qp = mlx5_glue->create_qp_ex
(priv->ctx,
&(struct ibv_qp_init_attr_ex){
@ -1419,7 +1425,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len,
.rx_hash_key = rss_key,
.rx_hash_key = (void *)(uintptr_t)rss_key,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
@ -1469,8 +1475,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
* An hash Rx queue on success.
*/
struct mlx5_hrxq *
mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;

View File

@ -134,7 +134,7 @@ struct mlx5_ind_table_ibv {
LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
uint16_t queues_n; /**< Number of queues in the list. */
uint32_t queues_n; /**< Number of queues in the list. */
uint16_t queues[]; /**< Queue list. */
};
@ -145,7 +145,7 @@ struct mlx5_hrxq {
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
uint8_t rss_key_len; /* Hash key length in bytes. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
@ -238,20 +238,22 @@ int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
uint16_t queues[],
uint16_t queues_n);
const uint16_t *queues,
uint32_t queues_n);
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
uint16_t queues[],
uint16_t queues_n);
const uint16_t *queues,
uint32_t queues_n);
int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_ibv *ind_tbl);
int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key,
uint8_t rss_key_len, uint64_t hash_fields,
uint16_t queues[], uint16_t queues_n);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key,
uint8_t rss_key_len, uint64_t hash_fields,
uint16_t queues[], uint16_t queues_n);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n);
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);

View File

@ -1234,13 +1234,11 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
struct sfc_rxq *rxq;
unsigned int rxq_hw_index_min;
unsigned int rxq_hw_index_max;
const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
uint64_t rss_hf;
uint8_t *rss_key = NULL;
const uint8_t *rss_key;
struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
unsigned int i;
if (rss->num == 0)
if (rss->queue_num == 0)
return -EINVAL;
rxq_sw_index = sa->rxq_count - 1;
@ -1248,7 +1246,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0;
for (i = 0; i < rss->num; ++i) {
for (i = 0; i < rss->queue_num; ++i) {
rxq_sw_index = rss->queue[i];
if (rxq_sw_index >= sa->rxq_count)
@ -1263,15 +1261,14 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
rxq_hw_index_max = rxq->hw_index;
}
rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
if ((rss->types & ~SFC_RSS_OFFLOADS) != 0)
return -EINVAL;
if (rss_conf != NULL) {
if (rss_conf->rss_key_len != sizeof(sa->rss_key))
if (rss->key_len) {
if (rss->key_len != sizeof(sa->rss_key))
return -EINVAL;
rss_key = rss_conf->rss_key;
rss_key = rss->key;
} else {
rss_key = sa->rss_key;
}
@ -1280,11 +1277,11 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss->types);
rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
unsigned int rxq_sw_index = rss->queue[i % rss->num];
unsigned int rxq_sw_index = rss->queue[i % rss->queue_num];
struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;

View File

@ -1215,7 +1215,7 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_action_not_supported;
}
if (flow && rss)
if (flow)
err = rss_add_actions(flow, pmd, rss, error);
} else {
goto exit_action_not_supported;
@ -2050,7 +2050,7 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
struct rte_flow_error *error)
{
/* 4096 is the maximum number of instructions for a BPF program */
int i;
unsigned int i;
int err;
struct rss_key rss_entry = { .hash_fields = 0,
.key_size = 0 };
@ -2066,8 +2066,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
}
/* Update RSS map entry with queues */
rss_entry.nb_queues = rss->num;
for (i = 0; i < rss->num; i++)
rss_entry.nb_queues = rss->queue_num;
for (i = 0; i < rss->queue_num; i++)
rss_entry.queues[i] = rss->queue[i];
rss_entry.hash_fields =
(1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);

View File

@ -203,9 +203,13 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
i < eth_dev->data->nb_rx_queues; ++i)
if (eth_dev->data->rx_queues[i])
queue[j++] = i;
action_rss.rss_conf = &rss_conf;
action_rss.num = j;
action_rss.queue = queue;
action_rss = (struct rte_flow_action_rss){
.types = rss_conf.rss_hf,
.key_len = rss_conf.rss_key_len,
.queue_num = j,
.key = rss_key,
.queue = queue,
};
ret = rte_flow_validate(sa->portid, &sa->attr,
sa->pattern, sa->action,
&err);

View File

@ -330,42 +330,29 @@ flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
off = 0;
if (dst.rss)
*dst.rss = (struct rte_flow_action_rss){
.num = src.rss->num,
.types = src.rss->types,
.key_len = src.rss->key_len,
.queue_num = src.rss->queue_num,
};
off += sizeof(*src.rss);
if (src.rss->num) {
if (src.rss->key_len) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
size = sizeof(*src.rss->queue) * src.rss->num;
size = sizeof(*src.rss->key) * src.rss->key_len;
if (dst.rss)
dst.rss->key = memcpy
((void *)((uintptr_t)dst.rss + off),
src.rss->key, size);
off += size;
}
if (src.rss->queue_num) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
size = sizeof(*src.rss->queue) * src.rss->queue_num;
if (dst.rss)
dst.rss->queue = memcpy
((void *)((uintptr_t)dst.rss + off),
src.rss->queue, size);
off += size;
}
off = RTE_ALIGN_CEIL(off, sizeof(double));
if (dst.rss) {
dst.rss->rss_conf = (void *)((uintptr_t)dst.rss + off);
*(struct rte_eth_rss_conf *)(uintptr_t)
dst.rss->rss_conf = (struct rte_eth_rss_conf){
.rss_key_len = src.rss->rss_conf->rss_key_len,
.rss_hf = src.rss->rss_conf->rss_hf,
};
}
off += sizeof(*src.rss->rss_conf);
if (src.rss->rss_conf->rss_key_len) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
size = sizeof(*src.rss->rss_conf->rss_key) *
src.rss->rss_conf->rss_key_len;
if (dst.rss) {
((struct rte_eth_rss_conf *)(uintptr_t)
dst.rss->rss_conf)->rss_key =
(void *)((uintptr_t)dst.rss + off);
memcpy(dst.rss->rss_conf->rss_key,
src.rss->rss_conf->rss_key,
size);
}
off += size;
}
size = off;
break;
default:

View File

@ -1033,13 +1033,21 @@ struct rte_flow_query_count {
* Similar to QUEUE, except RSS is additionally performed on packets to
* spread them among several queues according to the provided parameters.
*
* Unlike global RSS settings used by other DPDK APIs, unsetting the
* @p types field does not disable RSS in a flow rule. Doing so instead
* requests safe unspecified "best-effort" settings from the underlying PMD,
* which depending on the flow rule, may result in anything ranging from
* empty (single queue) to all-inclusive RSS.
*
* Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
* hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
* both can be requested simultaneously.
*/
struct rte_flow_action_rss {
const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
uint16_t num; /**< Number of entries in @p queue. */
uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
uint32_t key_len; /**< Hash key length in bytes. */
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */
const uint16_t *queue; /**< Queue indices to use. */
};