net/bnxt: fix extended port counter statistics

1. refactor stats allocation code to new routine
2. check for extended statistics support depends on "hwrm_spec_code"
   which is set in bnxt_hwrm_ver_get called later. Hence we were never
   querying extended port stats as flags field was not updated. Fixed
   this by moving the stats allocation after the call to
   bnxt_hwrm_ver_get.
3. we were incorrectly passing the host address used for port
   statistics to PORT_QSTATS_EXT command. Fixed this by passing the
   correct extended stats address.

Fixes: f55e12f334 ("net/bnxt: support extended port counters")
Cc: stable@dpdk.org

Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Kalesh AP 2019-07-17 16:11:25 +05:30 committed by Ferruh Yigit
parent 55e51c9624
commit 96b0931d51
3 changed files with 125 additions and 109 deletions

View File

@ -350,6 +350,7 @@ struct bnxt {
#define BNXT_FLAG_TRUSTED_VF_EN (1 << 11)
#define BNXT_FLAG_DFLT_VNIC_SET (1 << 12)
#define BNXT_FLAG_THOR_CHIP (1 << 13)
#define BNXT_FLAG_EXT_STATS_SUPPORTED (1 << 29)
#define BNXT_FLAG_NEW_RM (1 << 30)
#define BNXT_FLAG_INIT_DONE (1U << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))

View File

@ -3639,6 +3639,120 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
return 0;
}
static int bnxt_alloc_stats_mem(struct bnxt *bp)
{
struct rte_pci_device *pci_dev = bp->pdev;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz = NULL;
uint32_t total_alloc_len;
rte_iova_t mz_phys_addr;
if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
return 0;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
pci_dev->addr.function, "rx_port_stats");
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
total_alloc_len =
RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
sizeof(struct rx_port_stats_ext) + 512);
if (!mz) {
mz = rte_memzone_reserve(mz_name, total_alloc_len,
SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
"Can't map address to physical memory\n");
return -ENOMEM;
}
}
bp->rx_mem_zone = (const void *)mz;
bp->hw_rx_port_stats = mz->addr;
bp->hw_rx_port_stats_map = mz_phys_addr;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
pci_dev->addr.function, "tx_port_stats");
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
total_alloc_len =
RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
sizeof(struct tx_port_stats_ext) + 512);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
total_alloc_len,
SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual\n");
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
"Can't map address to physical memory\n");
return -ENOMEM;
}
}
bp->tx_mem_zone = (const void *)mz;
bp->hw_tx_port_stats = mz->addr;
bp->hw_tx_port_stats_map = mz_phys_addr;
bp->flags |= BNXT_FLAG_PORT_STATS;
/* Display extended statistics if FW supports it */
if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
!(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
return 0;
bp->hw_rx_port_stats_ext = (void *)
((uint8_t *)bp->hw_rx_port_stats +
sizeof(struct rx_port_stats));
bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats);
bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
bp->hw_tx_port_stats_ext = (void *)
((uint8_t *)bp->hw_tx_port_stats +
sizeof(struct tx_port_stats));
bp->hw_tx_port_stats_ext_map =
bp->hw_tx_port_stats_map +
sizeof(struct tx_port_stats);
bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
}
return 0;
}
#define ALLOW_FUNC(x) \
{ \
uint32_t arg = (x); \
@ -3649,11 +3763,7 @@ static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz = NULL;
static int version_printed;
uint32_t total_alloc_len;
rte_iova_t mz_phys_addr;
struct bnxt *bp;
uint16_t mtu;
int rc;
@ -3692,109 +3802,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
pci_dev->addr.function, "rx_port_stats");
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
sizeof(struct rx_port_stats) +
sizeof(struct rx_port_stats_ext) +
512);
if (!mz) {
mz = rte_memzone_reserve(mz_name, total_alloc_len,
SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
PMD_DRV_LOG(INFO,
"Memzone physical address same as virtual using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
}
bp->rx_mem_zone = (const void *)mz;
bp->hw_rx_port_stats = mz->addr;
bp->hw_rx_port_stats_map = mz_phys_addr;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
pci_dev->addr.function, "tx_port_stats");
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
sizeof(struct tx_port_stats) +
sizeof(struct tx_port_stats_ext) +
512);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
total_alloc_len,
SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
}
bp->tx_mem_zone = (const void *)mz;
bp->hw_tx_port_stats = mz->addr;
bp->hw_tx_port_stats_map = mz_phys_addr;
bp->flags |= BNXT_FLAG_PORT_STATS;
/* Display extended statistics if FW supports it */
if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0)
goto skip_ext_stats;
bp->hw_rx_port_stats_ext = (void *)
((uint8_t *)bp->hw_rx_port_stats +
sizeof(struct rx_port_stats));
bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats);
bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) {
bp->hw_tx_port_stats_ext = (void *)
((uint8_t *)bp->hw_tx_port_stats +
sizeof(struct tx_port_stats));
bp->hw_tx_port_stats_ext_map =
bp->hw_tx_port_stats_map +
sizeof(struct tx_port_stats);
bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
}
}
skip_ext_stats:
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
PMD_DRV_LOG(ERR,
@ -3823,6 +3830,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
rc = bnxt_alloc_stats_mem(bp);
if (rc)
goto error_free;
if (bp->max_tx_rings == 0) {
PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;

View File

@ -611,6 +611,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
}
}
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
HWRM_UNLOCK();
return rc;
@ -4495,13 +4498,13 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
req.port_id = rte_cpu_to_le_16(pf->port_id);
if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
req.tx_stat_host_addr =
rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
req.tx_stat_size =
rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
}
if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
req.rx_stat_host_addr =
rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
req.rx_stat_size =
rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
}