net/enic: fix crash due to static max number of queues

ENIC_CQ_MAX, ENIC_WQ_MAX and others are arbitrary values that
prevent the app from using more queues when they are available on
hardware. Remove them and dynamically allocate vnic_cq and such
arrays to accommodate all available hardware queues.

As a side effect of removing ENIC_CQ_MAX, this commit fixes a segfault
that would happen when the app requests more than 16 CQs, because
enic_set_vnic_res() does not consider ENIC_CQ_MAX. For example, the
following command causes a crash.

testpmd -- --rxq=16 --txq=16

Fixes: ce93d3c36d ("net/enic: fix resource check failures when bonding devices")
Cc: stable@dpdk.org

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
This commit is contained in:
Hyong Youb Kim 2018-01-22 17:05:28 -08:00 committed by Ferruh Yigit
parent 7d8f6c20cc
commit 6c45c33058
3 changed files with 44 additions and 44 deletions

View File

@ -24,13 +24,6 @@
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
#define ENIC_WQ_MAX 8
/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
* RQs use the same CQ.
*/
#define ENIC_RQ_MAX 16
#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
#define ENIC_MAX_MAC_ADDR 64
#define VLAN_ETH_HLEN 18
@ -121,17 +114,17 @@ struct enic {
unsigned int flags;
unsigned int priv_flags;
/* work queue */
struct vnic_wq wq[ENIC_WQ_MAX];
unsigned int wq_count;
/* work queue (len = conf_wq_count) */
struct vnic_wq *wq;
unsigned int wq_count; /* equals eth_dev nb_tx_queues */
/* receive queue */
struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
/* receive queue (len = conf_rq_count) */
struct vnic_rq *rq;
unsigned int rq_count; /* equals eth_dev nb_rx_queues */
/* completion queue */
struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
/* completion queue (len = conf_cq_count) */
struct vnic_cq *cq;
unsigned int cq_count; /* equals rq_count + wq_count */
/* interrupt resource */
struct vnic_intr intr;

View File

@ -190,13 +190,7 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
if (queue_idx >= ENIC_WQ_MAX) {
dev_err(enic,
"Max number of TX queues exceeded. Max is %d\n",
ENIC_WQ_MAX);
return -EINVAL;
}
RTE_ASSERT(queue_idx < enic->conf_wq_count);
eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
@ -310,17 +304,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
/* With Rx scatter support, two RQs are now used on VIC per RQ used
* by the application.
*/
if (queue_idx * 2 >= ENIC_RQ_MAX) {
dev_err(enic,
"Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
ENIC_RQ_MAX);
return -EINVAL;
}
RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
eth_dev->data->rx_queues[queue_idx] =
(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];

View File

@ -1042,6 +1042,9 @@ static void enic_dev_deinit(struct enic *enic)
vnic_dev_notify_unset(enic->vdev);
rte_free(eth_dev->data->mac_addrs);
rte_free(enic->cq);
rte_free(enic->rq);
rte_free(enic->wq);
}
@ -1049,27 +1052,28 @@ int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
unsigned int required_rq, required_wq, required_cq;
/* With Rx scatter support, two RQs are now used per RQ used by
* the application.
*/
if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
/* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
required_rq = eth_dev->data->nb_rx_queues * 2;
required_wq = eth_dev->data->nb_tx_queues;
required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
eth_dev->data->nb_rx_queues,
eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
required_rq, enic->conf_rq_count);
rc = -EINVAL;
}
if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
if (enic->conf_wq_count < required_wq) {
dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
eth_dev->data->nb_tx_queues, enic->conf_wq_count);
rc = -EINVAL;
}
if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
eth_dev->data->nb_tx_queues)) {
if (enic->conf_cq_count < required_cq) {
dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
(eth_dev->data->nb_rx_queues +
eth_dev->data->nb_tx_queues), enic->conf_cq_count);
required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
@ -1275,6 +1279,25 @@ static int enic_dev_init(struct enic *enic)
dev_err(enic, "See the ENIC PMD guide for more information.\n");
return -EINVAL;
}
/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
enic->conf_cq_count, 8);
enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
enic->conf_rq_count, 8);
enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
enic->conf_wq_count, 8);
if (enic->conf_cq_count > 0 && enic->cq == NULL) {
dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
return -1;
}
if (enic->conf_rq_count > 0 && enic->rq == NULL) {
dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
return -1;
}
if (enic->conf_wq_count > 0 && enic->wq == NULL) {
dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
return -1;
}
/* Get the supported filters */
enic_fdir_info(enic);