net/bnxt: handle multi queue mode properly
We are currently not handling multi queue RX/RSS modes correctly.
If RSS is not requested, create one VNIC per RXQ.
Fixes: 6133f20797
("net/bnxt: add Rx queue create/destroy")
Cc: stable@dpdk.org
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
parent
e28538f162
commit
4191bc8f79
@ -360,6 +360,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
||||
{
|
||||
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
|
||||
uint16_t max_vnics, i, j, vpool, vrxq;
|
||||
unsigned int max_rx_rings;
|
||||
|
||||
dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
||||
|
||||
@ -370,8 +371,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
||||
/* PF/VF specifics */
|
||||
if (BNXT_PF(bp))
|
||||
dev_info->max_vfs = bp->pdev->max_vfs;
|
||||
dev_info->max_rx_queues = bp->max_rx_rings;
|
||||
dev_info->max_tx_queues = bp->max_tx_rings;
|
||||
max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
|
||||
RTE_MIN(bp->max_rsscos_ctx,
|
||||
bp->max_stat_ctx)));
|
||||
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
|
||||
dev_info->max_rx_queues = max_rx_rings;
|
||||
dev_info->max_tx_queues = max_rx_rings;
|
||||
dev_info->reta_size = bp->max_rsscos_ctx;
|
||||
max_vnics = bp->max_vnics;
|
||||
|
||||
@ -827,7 +832,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
|
||||
*/
|
||||
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
|
||||
if (!rss_conf->rss_hf)
|
||||
return -EINVAL;
|
||||
RTE_LOG(ERR, PMD, "Hash type NONE\n");
|
||||
} else {
|
||||
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
|
||||
return -EINVAL;
|
||||
|
@ -60,11 +60,13 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
|
||||
int bnxt_mq_rx_configure(struct bnxt *bp)
|
||||
{
|
||||
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
|
||||
unsigned int i, j, nb_q_per_grp, ring_idx;
|
||||
int start_grp_id, end_grp_id, rc = 0;
|
||||
unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
|
||||
int start_grp_id, end_grp_id = 1, rc = 0;
|
||||
struct bnxt_vnic_info *vnic;
|
||||
struct bnxt_filter_info *filter;
|
||||
enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
|
||||
struct bnxt_rx_queue *rxq;
|
||||
bool rss_dflt_cr = false;
|
||||
|
||||
bp->nr_vnics = 0;
|
||||
|
||||
@ -98,45 +100,51 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
|
||||
}
|
||||
|
||||
/* Multi-queue mode */
|
||||
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
|
||||
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
|
||||
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
|
||||
enum rte_eth_nb_pools pools;
|
||||
const struct rte_eth_vmdq_rx_conf *conf =
|
||||
&dev_conf->rx_adv_conf.vmdq_rx_conf;
|
||||
|
||||
|
||||
switch (dev_conf->rxmode.mq_mode) {
|
||||
case ETH_MQ_RX_VMDQ_RSS:
|
||||
case ETH_MQ_RX_VMDQ_ONLY:
|
||||
{
|
||||
const struct rte_eth_vmdq_rx_conf *conf =
|
||||
&dev_conf->rx_adv_conf.vmdq_rx_conf;
|
||||
|
||||
/* ETH_8/64_POOLs */
|
||||
pools = conf->nb_queue_pools;
|
||||
/* For each pool, allocate MACVLAN CFA rule & VNIC */
|
||||
max_pools = RTE_MIN(bp->max_vnics,
|
||||
RTE_MIN(bp->max_l2_ctx,
|
||||
RTE_MIN(bp->max_rsscos_ctx,
|
||||
ETH_64_POOLS)));
|
||||
if (pools > max_pools)
|
||||
pools = max_pools;
|
||||
break;
|
||||
case ETH_MQ_RX_RSS:
|
||||
pools = 1;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
|
||||
dev_conf->rxmode.mq_mode);
|
||||
rc = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
/* For each pool, allocate MACVLAN CFA rule & VNIC */
|
||||
if (!pools) {
|
||||
pools = RTE_MIN(bp->max_vnics,
|
||||
RTE_MIN(bp->max_l2_ctx,
|
||||
RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
|
||||
RTE_LOG(ERR, PMD,
|
||||
"VMDq pool not set, defaulted to %d\n", pools);
|
||||
}
|
||||
/*
|
||||
* If MQ RX w/o RSS no need for per VNIC filter.
|
||||
*/
|
||||
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) ||
|
||||
(bp->rx_cp_nr_rings &&
|
||||
!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS)))
|
||||
rss_dflt_cr = true;
|
||||
|
||||
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
|
||||
start_grp_id = 0;
|
||||
end_grp_id = nb_q_per_grp;
|
||||
|
||||
ring_idx = 0;
|
||||
for (i = 0; i < pools; i++) {
|
||||
vnic = bnxt_alloc_vnic(bp);
|
||||
if (!vnic) {
|
||||
RTE_LOG(ERR, PMD,
|
||||
"VNIC alloc failed\n");
|
||||
RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
@ -144,7 +152,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
|
||||
STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
|
||||
bp->nr_vnics++;
|
||||
|
||||
for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
|
||||
for (j = 0, ring_idx = 0; j < nb_q_per_grp; j++, ring_idx++) {
|
||||
rxq = bp->eth_dev->data->rx_queues[ring_idx];
|
||||
rxq->vnic = vnic;
|
||||
}
|
||||
@ -154,10 +162,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
|
||||
vnic->start_grp_id = start_grp_id;
|
||||
vnic->end_grp_id = end_grp_id;
|
||||
|
||||
if (rss_dflt_cr && i) {
|
||||
vnic->rss_dflt_cr = true;
|
||||
goto skip_filter_allocation;
|
||||
}
|
||||
filter = bnxt_alloc_filter(bp);
|
||||
if (!filter) {
|
||||
RTE_LOG(ERR, PMD,
|
||||
"L2 filter alloc failed\n");
|
||||
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
@ -167,47 +178,45 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
|
||||
*/
|
||||
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
|
||||
|
||||
skip_filter_allocation:
|
||||
start_grp_id = end_grp_id;
|
||||
end_grp_id += nb_q_per_grp;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Non-VMDq mode - RSS, DCB, RSS+DCB */
|
||||
/* Init default VNIC for RSS or DCB only */
|
||||
vnic = bnxt_alloc_vnic(bp);
|
||||
if (!vnic) {
|
||||
RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
vnic->flags |= BNXT_VNIC_INFO_BCAST;
|
||||
/* Partition the rx queues for the single pool */
|
||||
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
|
||||
rxq = bp->eth_dev->data->rx_queues[i];
|
||||
rxq->vnic = vnic;
|
||||
}
|
||||
STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
|
||||
bp->nr_vnics++;
|
||||
|
||||
vnic->func_default = true;
|
||||
vnic->ff_pool_idx = 0;
|
||||
vnic->start_grp_id = 0;
|
||||
vnic->end_grp_id = bp->rx_cp_nr_rings;
|
||||
filter = bnxt_alloc_filter(bp);
|
||||
if (!filter) {
|
||||
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
|
||||
|
||||
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
|
||||
vnic->hash_type =
|
||||
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
|
||||
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
|
||||
|
||||
out:
|
||||
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
|
||||
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
|
||||
uint16_t hash_type = 0;
|
||||
|
||||
if (rss->rss_hf & ETH_RSS_IPV4)
|
||||
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
|
||||
if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
|
||||
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
|
||||
if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
|
||||
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
|
||||
if (rss->rss_hf & ETH_RSS_IPV6)
|
||||
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
|
||||
if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
|
||||
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
|
||||
if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
|
||||
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
|
||||
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
|
||||
vnic->hash_type |= hash_type;
|
||||
|
||||
/*
|
||||
* Use the supplied key if the key length is
|
||||
* acceptable and the rss_key is not NULL
|
||||
*/
|
||||
if (rss->rss_key &&
|
||||
rss->rss_key_len <= HW_HASH_KEY_SIZE)
|
||||
memcpy(vnic->rss_hash_key,
|
||||
rss->rss_key, rss->rss_key_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
err_out:
|
||||
|
Loading…
Reference in New Issue
Block a user