app/testpmd: fix number of mbufs in pool

The number of mbufs in pools is not consistent depending on the
options passed by the user and the number of ports, especially
in numa mode, when the number of mbuf is specified by the user.

When the user specifies the number of mbuf (per pool), it should
overrides the default value.

- before the patch

./build/app/testpmd -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd -- --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=256000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=256000, size=2176, socket=1
  # BAD, should be n=8000 for each socket

./build/app/testpmd -- --no-numa -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd -- --no-numa --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=128000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=128000, size=2176, socket=1
  # BAD, should be n=8000 for each socket

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- --no-numa -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- --no-numa \
     --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

- after the patch

./build/app/testpmd -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd -- --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=8000, size=2176, socket=1

./build/app/testpmd -- --no-numa -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd -- --no-numa --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=8000, size=2176, socket=1

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- --no-numa -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- --no-numa \
     --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

Fixes: b6ea6408fbc7 ("ethdev: store numa_node per device")
Cc: stable@dpdk.org

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
Olivier Matz 2017-04-24 14:33:58 +02:00 committed by Thomas Monjalon
parent 3c1a5444d4
commit 3ab64341da

View File

@ -543,34 +543,6 @@ init_config(void)
fwd_lcores[lc_id]->cpuid_idx = lc_id;
}
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
* socket 0 memory by default.
* Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
*
* Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
* nb_txd can be configured at run time.
*/
if (param_total_num_mbufs)
nb_mbuf_per_pool = param_total_num_mbufs;
else {
nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
if (!numa_support)
nb_mbuf_per_pool =
(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
}
if (!numa_support) {
if (socket_num == UMA_NO_CONFIG)
mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
else
mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
socket_num);
}
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
rte_eth_dev_info_get(pid, &port->dev_info);
@ -593,20 +565,37 @@ init_config(void)
port->need_reconfig_queues = 1;
}
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
* socket 0 memory by default.
* Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
*
* Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
* nb_txd can be configured at run time.
*/
if (param_total_num_mbufs)
nb_mbuf_per_pool = param_total_num_mbufs;
else {
nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
(nb_lcores * mb_mempool_cache) +
RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
}
if (numa_support) {
uint8_t i;
unsigned int nb_mbuf;
if (param_total_num_mbufs && nb_ports != 0)
nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
for (i = 0; i < max_socket; i++) {
nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
if (nb_mbuf)
mbuf_pool_create(mbuf_data_size,
nb_mbuf,i);
}
for (i = 0; i < max_socket; i++)
mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
} else {
if (socket_num == UMA_NO_CONFIG)
mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
else
mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
socket_num);
}
init_port_config();
/*