app/testpmd: support multiple mbuf pools per Rx queue

Some of the HW has support for choosing memory pools based on
the packet's size. The pool sort capability allows PMD/NIC to
choose a memory pool based on the packet's length.

On multiple mempool support enabled, populate mempool array
accordingly. Also, print pool name on which packet is received.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
This commit is contained in:
Hanumanth Pothula 2022-11-10 15:46:31 +05:30 committed by Andrew Rybchenko
parent 54a0f4d756
commit 4f04edcda7
3 changed files with 51 additions and 26 deletions

View File

@ -2653,12 +2653,20 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
{
union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
struct rte_mempool *mpx;
unsigned int i, mp_n;
uint32_t prev_hdrs = 0;
int ret;
if (rx_pkt_nb_segs <= 1 ||
(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
/* Verify Rx queue configuration is single pool and segment or
* multiple pool/segment.
* @see rte_eth_rxconf::rx_mempools
* @see rte_eth_rxconf::rx_seg
*/
if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
/* Single pool/segment configuration */
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
@ -2666,9 +2674,12 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
rx_conf, mp);
goto exit;
}
if (rx_pkt_nb_segs > 1 ||
rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
/* multi-segment configuration */
for (i = 0; i < rx_pkt_nb_segs; i++) {
struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
struct rte_mempool *mpx;
/*
* Use last valid pool for the segments with number
* exceeding the pool index.
@ -2690,10 +2701,21 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
}
rx_conf->rx_nseg = rx_pkt_nb_segs;
rx_conf->rx_seg = rx_useg;
} else {
/* multi-pool configuration */
for (i = 0; i < mbuf_data_size_n; i++) {
mpx = mbuf_pool_find(socket_id, i);
rx_mempool[i] = mpx ? mpx : mp;
}
rx_conf->rx_mempools = rx_mempool;
rx_conf->rx_nmempool = mbuf_data_size_n;
}
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
socket_id, rx_conf, NULL);
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
rx_conf->rx_mempools = NULL;
rx_conf->rx_nmempool = 0;
exit:
ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
RTE_ETH_QUEUE_STATE_STOPPED :

View File

@ -80,6 +80,9 @@ extern uint8_t cl_quit;
#define MIN_TOTAL_NUM_MBUFS 1024
/* Maximum number of pools supported per Rx queue */
#define MAX_MEMPOOL 8
typedef uint8_t lcoreid_t;
typedef uint16_t portid_t;
typedef uint16_t queueid_t;

View File

@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
print_ether_addr(" - dst=", &eth_hdr->dst_addr,
print_buf, buf_size, &cur_len);
MKDUMPSTR(print_buf, buf_size, cur_len,
" - type=0x%04x - length=%u - nb_segs=%d",
eth_type, (unsigned int) mb->pkt_len,
" - pool=%s - type=0x%04x - length=%u - nb_segs=%d",
mb->pool->name, eth_type, (unsigned int) mb->pkt_len,
(int)mb->nb_segs);
ol_flags = mb->ol_flags;
if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {