app/testpmd: add option to fix multi-mempool check

Add new testpmd command line argument, multi-rx-mempool,
to control multi-rx-mempool feature. By default it's disabled.

Also, validate ethdev parameter 'max_rx_mempools' to know whether
device supports multi-mempool feature or not.

Bugzilla ID: 1128
Fixes: 4f04edcda7 ("app/testpmd: support multiple mbuf pools per Rx queue")

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@amd.com>
Tested-by: Yingya Han <yingyax.han@intel.com>
Tested-by: Yaqi Tang <yaqi.tang@intel.com>
This commit is contained in:
Hanumanth Pothula 2022-11-21 23:37:56 +05:30 committed by Ferruh Yigit
parent c469b892a3
commit a4bf542111
4 changed files with 54 additions and 26 deletions

View File

@ -88,7 +88,8 @@ usage(char* progname)
"in NUMA mode.\n");
printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
"N bytes. If multiple numbers are specified the extra pools "
"will be created to receive with packet split features\n");
"will be created to receive packets based on the features "
"supported, like packet split, multi-rx-mempool.\n");
printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
"in mbuf pools.\n");
printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
@ -155,6 +156,7 @@ usage(char* progname)
printf(" --rxhdrs=eth[,ipv4]*: set RX segment protocol to split.\n");
printf(" --txpkts=X[,Y]*: set TX segment sizes"
" or total packet length.\n");
printf(" --multi-rx-mempool: enable multi-rx-mempool support\n");
printf(" --txonly-multi-flow: generate multiple flows in txonly mode\n");
printf(" --tx-ip=src,dst: IP addresses in Tx-only mode\n");
printf(" --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
@ -669,6 +671,7 @@ launch_args_parse(int argc, char** argv)
{ "rxpkts", 1, 0, 0 },
{ "rxhdrs", 1, 0, 0 },
{ "txpkts", 1, 0, 0 },
{ "multi-rx-mempool", 0, 0, 0 },
{ "txonly-multi-flow", 0, 0, 0 },
{ "rxq-share", 2, 0, 0 },
{ "eth-link-speed", 1, 0, 0 },
@ -1295,6 +1298,8 @@ launch_args_parse(int argc, char** argv)
else
rte_exit(EXIT_FAILURE, "bad txpkts\n");
}
if (!strcmp(lgopts[opt_idx].name, "multi-rx-mempool"))
multi_rx_mempool = 1;
if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
txonly_multi_flow = 1;
if (!strcmp(lgopts[opt_idx].name, "rxq-share")) {

View File

@ -249,6 +249,8 @@ uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT];
uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */
/*
* Configuration of packet segments used by the "txonly" processing engine.
*/
@ -2659,24 +2661,9 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
uint32_t prev_hdrs = 0;
int ret;
/* Verify Rx queue configuration is single pool and segment or
* multiple pool/segment.
* @see rte_eth_rxconf::rx_mempools
* @see rte_eth_rxconf::rx_seg
*/
if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
/* Single pool/segment configuration */
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
nb_rx_desc, socket_id,
rx_conf, mp);
goto exit;
}
if (rx_pkt_nb_segs > 1 ||
rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
if ((rx_pkt_nb_segs > 1) &&
(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
/* multi-segment configuration */
for (i = 0; i < rx_pkt_nb_segs; i++) {
struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
@ -2701,22 +2688,52 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
}
rx_conf->rx_nseg = rx_pkt_nb_segs;
rx_conf->rx_seg = rx_useg;
} else {
rx_conf->rx_mempools = NULL;
rx_conf->rx_nmempool = 0;
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
socket_id, rx_conf, NULL);
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
} else if (multi_rx_mempool == 1) {
/* multi-pool configuration */
struct rte_eth_dev_info dev_info;
if (mbuf_data_size_n <= 1) {
fprintf(stderr, "Invalid number of mempools %u\n",
mbuf_data_size_n);
return -EINVAL;
}
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
return ret;
if (dev_info.max_rx_mempools == 0) {
fprintf(stderr,
"Port %u doesn't support requested multi-rx-mempool configuration.\n",
port_id);
return -ENOTSUP;
}
for (i = 0; i < mbuf_data_size_n; i++) {
mpx = mbuf_pool_find(socket_id, i);
rx_mempool[i] = mpx ? mpx : mp;
}
rx_conf->rx_mempools = rx_mempool;
rx_conf->rx_nmempool = mbuf_data_size_n;
}
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
socket_id, rx_conf, NULL);
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
rx_conf->rx_mempools = NULL;
rx_conf->rx_nmempool = 0;
exit:
rx_conf->rx_mempools = NULL;
rx_conf->rx_nmempool = 0;
} else {
/* Single pool/segment configuration */
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
rx_conf->rx_mempools = NULL;
rx_conf->rx_nmempool = 0;
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
socket_id, rx_conf, mp);
}
ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
RTE_ETH_QUEUE_STATE_STOPPED :
RTE_ETH_QUEUE_STATE_STARTED;

View File

@ -592,6 +592,8 @@ extern uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
extern uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
extern uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
extern uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature. */
/*
* Configuration of packet segments used by the "txonly" processing engine.
*/

View File

@ -365,6 +365,10 @@ The command line options are:
Set TX segment sizes or total packet length. Valid for ``tx-only``
and ``flowgen`` forwarding modes.
* ``--multi-rx-mempool``
Enable multiple mbuf pools per Rx queue.
* ``--txonly-multi-flow``
Generate multiple flows in txonly mode.