net/bnxt: fix ring and context memory allocation

Use requested socket ID when allocating memory for transmit rings,
receive rings, and completion queues. Use device NUMA ID when
allocating context memory, notification queue rings, async
completion queue rings, and VNIC attributes.

Fixes: 6eb3cc2294fd ("net/bnxt: add initial Tx code")
Fixes: 9738793f28ec ("net/bnxt: add VNIC functions and structs")
Fixes: f8168ca0e690 ("net/bnxt: support thor controller")
Fixes: bd0a14c99f65 ("net/bnxt: use dedicated CPR for async events")
Fixes: 683e5cf79249 ("net/bnxt: use common NQ ring")
Cc: stable@dpdk.org

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Lance Richardson 2021-06-16 13:55:20 -04:00 committed by Ajit Khaparde
parent 8ac3cbba62
commit c6c90a33de
6 changed files with 30 additions and 30 deletions

View File

@ -580,13 +580,14 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
return rc;
}
static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size,
struct bnxt_ctx_mem_buf_info *ctx)
{
if (!ctx)
return -EINVAL;
ctx->va = rte_zmalloc(type, size, 0);
ctx->va = rte_zmalloc_socket(type, size, 0,
bp->eth_dev->device->numa_node);
if (ctx->va == NULL)
return -ENOMEM;
rte_mem_lock_page(ctx->va);
@ -610,7 +611,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 4 bytes for each counter-id */
rc = bnxt_alloc_ctx_mem_buf(type,
rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 4,
&bp->flow_stat->rx_fc_in_tbl);
if (rc)
@ -619,7 +620,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
rc = bnxt_alloc_ctx_mem_buf(type,
rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 16,
&bp->flow_stat->rx_fc_out_tbl);
if (rc)
@ -628,7 +629,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 4 bytes for each counter-id */
rc = bnxt_alloc_ctx_mem_buf(type,
rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 4,
&bp->flow_stat->tx_fc_in_tbl);
if (rc)
@ -637,7 +638,7 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
rc = bnxt_alloc_ctx_mem_buf(type,
rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 16,
&bp->flow_stat->tx_fc_out_tbl);
if (rc)
@ -4667,7 +4668,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
rmem->nr_pages * 8,
SOCKET_ID_ANY,
bp->eth_dev->device->numa_node,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
@ -4690,7 +4691,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
mem_size,
SOCKET_ID_ANY,
bp->eth_dev->device->numa_node,
RTE_MEMZONE_1GB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,

View File

@ -94,7 +94,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
* tx bd ring - Only non-zero length if tx_ring_info is not NULL
* rx bd ring - Only non-zero length if rx_ring_info is not NULL
*/
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
struct bnxt_tx_queue *txq,
struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
@ -203,7 +203,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
SOCKET_ID_ANY,
socket_id,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
@ -435,24 +435,23 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
struct bnxt_cp_ring_info *nqr;
struct bnxt_ring *ring;
int ring_index = BNXT_NUM_ASYNC_CPR(bp);
unsigned int socket_id;
uint8_t ring_type;
int rc = 0;
if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
return 0;
socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
nqr = rte_zmalloc_socket("nqr",
sizeof(struct bnxt_cp_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
RTE_CACHE_LINE_SIZE,
bp->eth_dev->device->numa_node);
if (nqr == NULL)
return -ENOMEM;
ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
RTE_CACHE_LINE_SIZE,
bp->eth_dev->device->numa_node);
if (ring == NULL) {
rte_free(nqr);
return -ENOMEM;
@ -467,7 +466,8 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
nqr->cp_ring_struct = ring;
rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
NULL, nqr, NULL, "l2_nqr");
if (rc) {
rte_free(ring);
rte_free(nqr);
@ -805,22 +805,21 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = NULL;
struct bnxt_ring *ring = NULL;
unsigned int socket_id;
if (BNXT_NUM_ASYNC_CPR(bp) == 0)
return 0;
socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
cpr = rte_zmalloc_socket("cpr",
sizeof(struct bnxt_cp_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
RTE_CACHE_LINE_SIZE,
bp->eth_dev->device->numa_node);
if (cpr == NULL)
return -ENOMEM;
ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
RTE_CACHE_LINE_SIZE,
bp->eth_dev->device->numa_node);
if (ring == NULL) {
rte_free(cpr);
return -ENOMEM;
@ -836,7 +835,6 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
bp->async_cp_ring = cpr;
cpr->cp_ring_struct = ring;
return bnxt_alloc_rings(bp, 0, NULL, NULL,
bp->async_cp_ring, NULL,
"def_cp");
return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
NULL, bp->async_cp_ring, NULL, "def_cp");
}

View File

@ -67,7 +67,7 @@ struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
int bnxt_alloc_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
struct bnxt_tx_queue *txq,
struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,

View File

@ -345,8 +345,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
"rxr");
rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
NULL, "rxr");
if (rc) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");

View File

@ -149,8 +149,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL,
"txr")) {
if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring,
NULL, "txr")) {
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
rc = -ENOMEM;
goto err;

View File

@ -145,7 +145,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
entry_length * max_vnics, SOCKET_ID_ANY,
entry_length * max_vnics,
bp->eth_dev->device->numa_node,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);