examples/vmdq: fix RSS configuration

In order that all queues of pools can receive packets,
add enable-rss argument to change RSS configuration.

Fixes: 6bb97df521 ("examples/vmdq: new app")
Cc: stable@dpdk.org

Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
Acked-by: Xiaoyun Li <xiaoyun.li@intel.com>
Tested-by: Yingya Han <yingyax.han@intel.com>
This commit is contained in:
Junyu Jiang 2020-03-25 06:32:48 +00:00 committed by Ferruh Yigit
parent 9a82259d5d
commit 8f5b4af736
2 changed files with 37 additions and 8 deletions

View File

@ -26,13 +26,13 @@ The Intel® 82599 10 Gigabit Ethernet Controller NIC also supports the splitting
While the Intel® X710 or XL710 Ethernet Controller NICs support many configurations of VMDq pools of 4 or 8 queues each.
And queues numbers for each VMDq pool can be changed by setting CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
in config/common_* file.
The nb-pools parameter can be passed on the command line, after the EAL parameters:
The nb-pools and enable-rss parameters can be passed on the command line, after the EAL parameters:
.. code-block:: console
./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP --enable-rss
where, NP can be 8, 16 or 32.
where, NP can be 8, 16 or 32, rss is disabled by default.
In Linux* user space, the application can display statistics with the number of packets received on each queue.
To have the application display the statistics, send a SIGHUP signal to the running application process.

View File

@ -59,6 +59,7 @@ static uint32_t enabled_port_mask;
/* number of pools (if user does not specify any, 8 by default */
static uint32_t num_queues = 8;
static uint32_t num_pools = 8;
static uint8_t rss_enable;
/* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = {
@ -143,6 +144,13 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
if (rss_enable) {
eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
ETH_RSS_UDP |
ETH_RSS_TCP |
ETH_RSS_SCTP;
}
return 0;
}
@ -164,6 +172,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
uint16_t q;
uint16_t queues_per_pool;
uint32_t max_nb_pools;
uint64_t rss_hf_tmp;
/*
* The max pool number from dev_info will be used to validate the pool
@ -209,6 +218,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
if (!rte_eth_dev_is_valid_port(port))
return -1;
rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
printf("Port %u modified RSS hash function based on hardware support,"
"requested:%#"PRIx64" configured:%#"PRIx64"\n",
port,
rss_hf_tmp,
port_conf.rx_adv_conf.rss_conf.rss_hf);
}
/*
* Though in this example, we only receive packets from the first queue
* of each pool and send packets through first rte_lcore_count() tx
@ -363,7 +383,8 @@ static void
vmdq_usage(const char *prgname)
{
printf("%s [EAL options] -- -p PORTMASK]\n"
" --nb-pools NP: number of pools\n",
" --nb-pools NP: number of pools\n"
" --enable-rss: enable RSS (disabled by default)\n",
prgname);
}
@ -377,6 +398,7 @@ vmdq_parse_args(int argc, char **argv)
const char *prgname = argv[0];
static struct option long_option[] = {
{"nb-pools", required_argument, NULL, 0},
{"enable-rss", 0, NULL, 0},
{NULL, 0, 0, 0}
};
@ -394,11 +416,18 @@ vmdq_parse_args(int argc, char **argv)
}
break;
case 0:
if (vmdq_parse_num_pools(optarg) == -1) {
printf("invalid number of pools\n");
vmdq_usage(prgname);
return -1;
if (!strcmp(long_option[option_index].name,
"nb-pools")) {
if (vmdq_parse_num_pools(optarg) == -1) {
printf("invalid number of pools\n");
vmdq_usage(prgname);
return -1;
}
}
if (!strcmp(long_option[option_index].name,
"enable-rss"))
rss_enable = 1;
break;
default: