net/mlx5: avoid reusing old queue's mbuf on reconfigure

This patch prepare the merge of fake mbuf allocation needed by the vector
code with rxq_alloc_elts() where all mbuf of the queues should be
allocated.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Nélio Laranjeiro 2017-08-23 10:15:05 +02:00 committed by Ferruh Yigit
parent 1d5d4ab903
commit 5e8494c3ae

View File

@ -666,16 +666,12 @@ rxq_trim_elts(struct rxq *rxq)
* Pointer to RX queue structure. * Pointer to RX queue structure.
* @param elts_n * @param elts_n
* Number of elements to allocate. * Number of elements to allocate.
* @param[in] pool
* If not NULL, fetch buffers from this array instead of allocating them
* with rte_pktmbuf_alloc().
* *
* @return * @return
* 0 on success, errno value on failure. * 0 on success, errno value on failure.
*/ */
static int static int
rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n, rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n)
struct rte_mbuf *(*pool)[])
{ {
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int i; unsigned int i;
@ -687,12 +683,7 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
volatile struct mlx5_wqe_data_seg *scat = volatile struct mlx5_wqe_data_seg *scat =
&(*rxq_ctrl->rxq.wqes)[i]; &(*rxq_ctrl->rxq.wqes)[i];
buf = (pool != NULL) ? (*pool)[i] : NULL; buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf != NULL) {
rte_pktmbuf_reset(buf);
rte_pktmbuf_refcnt_update(buf, 1);
} else
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) { if (buf == NULL) {
ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
ret = ENOMEM; ret = ENOMEM;
@ -725,7 +716,6 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
assert(ret == 0); assert(ret == 0);
return 0; return 0;
error: error:
assert(pool == NULL);
elts_n = i; elts_n = i;
for (i = 0; (i != elts_n); ++i) { for (i = 0; (i != elts_n); ++i) {
if ((*rxq_ctrl->rxq.elts)[i] != NULL) if ((*rxq_ctrl->rxq.elts)[i] != NULL)
@ -1064,14 +1054,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
(void *)dev, strerror(ret)); (void *)dev, strerror(ret));
goto error; goto error;
} }
/* Reuse buffers from original queue if possible. */ ret = rxq_alloc_elts(&tmpl, desc);
if (rxq_ctrl->rxq.elts_n) {
assert(1 << rxq_ctrl->rxq.elts_n == desc);
assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
rxq_trim_elts(&rxq_ctrl->rxq);
ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
} else
ret = rxq_alloc_elts(&tmpl, desc, NULL);
if (ret) { if (ret) {
ERROR("%p: RXQ allocation failed: %s", ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(ret)); (void *)dev, strerror(ret));