numam-dpdk/drivers/net/bnxt/bnxt_vnic.c
Somnath Kotur b7e5f647e2 net/bnxt: fix freeing all VNICs during port stop
Now that vnics are created only as part of the flow creation cmds
and not during init, we cannot rely on iterating only through
'nr_vnics'. We need to sweep all the vnics by using 'max_vnics'
otherwise a vnic with a stale 'rx_queue_cnt' might be left lingering
post a port stop/start operation. This could lead to a segfault.
This change is required because of the recent fix made by commit to
"fix flow creation with non-consecutive group ids".

Fixes: fcdd7210aa1f ("net/bnxt: fix flow creation with non-consecutive group ids")

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2019-11-26 18:05:15 +01:00

264 lines
6.2 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2014-2018 Broadcom
* All rights reserved.
*/
#include <inttypes.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include "bnxt.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
/*
* VNIC Functions
*/
void prandom_bytes(void *dest_ptr, size_t len)
{
char *dest = (char *)dest_ptr;
uint64_t rb;
while (len) {
rb = rte_rand();
if (len >= 8) {
memcpy(dest, &rb, 8);
len -= 8;
dest += 8;
} else {
memcpy(dest, &rb, len);
dest += len;
len = 0;
}
}
}
static void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
int i;
max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->hash_mode =
HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
vnic->rx_queue_cnt = 0;
STAILQ_INIT(&vnic->filter);
STAILQ_INIT(&vnic->flow_list);
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
}
}
struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
return vnic;
}
void bnxt_free_all_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
unsigned int i;
for (i = 0; i < bp->max_vnics; i++) {
vnic = &bp->vnic_info[i];
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
vnic->rx_queue_cnt = 0;
}
}
void bnxt_free_vnic_attributes(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
unsigned int i;
if (bp->vnic_info == NULL)
return;
for (i = 0; i < bp->max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->rss_table) {
/* 'Unreserve' the rss_table */
/* N/A */
vnic->rss_table = NULL;
}
if (vnic->rss_hash_key) {
/* 'Unreserve' the rss_hash_key */
/* N/A */
vnic->rss_hash_key = NULL;
}
}
}
int bnxt_alloc_vnic_attributes(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t entry_length;
size_t rss_table_size;
uint16_t max_vnics;
int i;
rte_iova_t mz_phys_addr;
entry_length = HW_HASH_KEY_SIZE +
BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN;
if (BNXT_CHIP_THOR(bp))
rss_table_size = BNXT_RSS_TBL_SIZE_THOR *
2 * sizeof(*vnic->rss_table);
else
rss_table_size = HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
entry_length = RTE_CACHE_LINE_ROUNDUP(entry_length + rss_table_size);
max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
entry_length * max_vnics, SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);
if (!mz)
return -ENOMEM;
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
PMD_DRV_LOG(DEBUG,
"Memzone physical address same as virtual.\n");
PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map to physical memory\n");
return -ENOMEM;
}
}
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
/* Allocate rss table and hash key */
vnic->rss_table =
(void *)((char *)mz->addr + (entry_length * i));
memset(vnic->rss_table, -1, entry_length);
vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i);
vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
rss_table_size);
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
rss_table_size;
vnic->mc_list = (void *)((char *)vnic->rss_hash_key +
HW_HASH_KEY_SIZE);
vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr +
HW_HASH_KEY_SIZE;
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
}
return 0;
}
void bnxt_free_vnic_mem(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics, i;
if (bp->vnic_info == NULL)
return;
max_vnics = bp->max_vnics;
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
rte_free(bp->vnic_info);
bp->vnic_info = NULL;
}
int bnxt_alloc_vnic_mem(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic_mem;
uint16_t max_vnics;
max_vnics = bp->max_vnics;
/* Allocate memory for VNIC pool and filter pool */
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
bp->vnic_info = vnic_mem;
bnxt_init_vnics(bp);
return 0;
}
int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
if (!vnic->fw_grp_ids) {
PMD_DRV_LOG(ERR,
"Failed to alloc %d bytes for group ids\n",
size);
return -ENOMEM;
}
memset(vnic->fw_grp_ids, -1, size);
return 0;
}
uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
{
uint16_t hwrm_type = 0;
if (rte_type & ETH_RSS_IPV4)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
if (rte_type & ETH_RSS_IPV6)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
return hwrm_type;
}