net/bnxt: allocate Rx/Tx and completion rings

Perform allocation and free()ing of ring and information structures
for the TX, RX, and completion rings. The previous patches had
so far provided top level stubs and generic ring support, while this
patch does the real allocation and freeing of the memory specific to
each different type of generic ring.

For example bnxt_init_tx_ring_struct() or bnxt_init_rx_ring_struct() is
now allocating memory based on the socked_id being provided.

bnxt_tx_queue_setup_op() or bnxt_rx_queue_setup_op() have gone through
some reformatting to perform a graceful cleanup in case memory
allocation fails.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Stephen Hurd <stephen.hurd@broadcom.com>
Reviewed-by: David Christensen <david.christensen@broadcom.com>
This commit is contained in:
Ajit Khaparde 2016-06-15 14:23:15 -07:00 committed by Bruce Richardson
parent 2eb53b134a
commit 2bb1d5db43
10 changed files with 127 additions and 41 deletions

View File

@ -31,6 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_malloc.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_hwrm.h"
@ -121,21 +123,37 @@ reject:
void bnxt_free_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
struct bnxt_ring *ring = cpr->cp_ring_struct;
bnxt_free_ring(ring);
bnxt_free_ring(cpr->cp_ring_struct);
rte_free(cpr->cp_ring_struct);
rte_free(cpr);
}
/* For the default completion ring only */
void bnxt_init_def_ring_struct(struct bnxt *bp)
int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
struct bnxt_ring *ring = cpr->cp_ring_struct;
struct bnxt_cp_ring_info *cpr;
struct bnxt_ring *ring;
cpr = rte_zmalloc_socket("cpr",
sizeof(struct bnxt_cp_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
if (cpr == NULL)
return -ENOMEM;
bp->def_cp_ring = cpr;
ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
if (ring == NULL)
return -ENOMEM;
cpr->cp_ring_struct = ring;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
ring->ring_mask = ring->ring_size - 1;
ring->vmem_size = 0;
ring->vmem = NULL;
return 0;
}

View File

@ -79,7 +79,7 @@ struct bnxt_cp_ring_info {
struct bnxt;
void bnxt_free_def_cp_ring(struct bnxt *bp);
void bnxt_init_def_ring_struct(struct bnxt *bp);
int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);

View File

@ -51,6 +51,7 @@ void bnxt_free_ring(struct bnxt_ring *ring)
memset((char *)*ring->vmem, 0, ring->vmem_size);
*ring->vmem = NULL;
}
rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
}
/*
@ -135,6 +136,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
tx_ring->mem_zone = (const void *)mz;
if (!tx_ring->bd)
return -ENOMEM;
@ -154,6 +156,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
(struct rx_prod_pkt_bd *)rx_ring->bd;
rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
rx_ring->mem_zone = (const void *)mz;
if (!rx_ring->bd)
return -ENOMEM;
@ -169,6 +172,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
cp_ring_info->cp_desc_ring = cp_ring->bd;
cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
cp_ring->mem_zone = (const void *)mz;
if (!cp_ring->bd)
return -ENOMEM;

View File

@ -76,6 +76,7 @@ struct bnxt_ring {
void **vmem;
uint16_t fw_ring_id; /* Ring id filled by Chimp FW */
const void *mem_zone;
};
struct bnxt_ring_grp_info {

View File

@ -53,8 +53,6 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
/* 'Unreserve' rte_memzone */
if (cpr->hw_stats)
cpr->hw_stats = NULL;
}
@ -272,10 +270,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_rx_queue *rxq;
int rc = 0;
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
return -EINVAL;
rc = -EINVAL;
goto out;
}
if (eth_dev->data->rx_queues) {
@ -287,14 +287,17 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
rxq->bp = bp;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
bnxt_init_rx_ring_struct(rxq);
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
goto out;
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
@ -307,8 +310,10 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
"rxr")) {
RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
return 0;
out:
return rc;
}

View File

@ -255,17 +255,20 @@ void bnxt_free_rx_rings(struct bnxt *bp)
if (!rxq)
continue;
/* TODO: free() rxq->rx_ring and rxq->rx_ring->rx_ring_struct */
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
/* TODO: free() rxq->cp_ring and rxq->cp_ring->cp_ring_struct */
rte_free(rxq->rx_ring->rx_ring_struct);
rte_free(rxq->rx_ring);
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
rte_free(rxq->cp_ring->cp_ring_struct);
rte_free(rxq->cp_ring);
rte_free(rxq);
bp->rx_queues[i] = NULL;
}
}
void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq)
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
struct bnxt *bp = rxq->bp;
struct bnxt_cp_ring_info *cpr;
@ -277,8 +280,19 @@ void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq)
(2 * VLAN_TAG_SIZE);
rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
rxr = rte_zmalloc_socket("bnxt_rx_ring",
sizeof(struct bnxt_rx_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxr == NULL)
return -ENOMEM;
rxq->rx_ring = rxr;
ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
if (ring == NULL)
return -ENOMEM;
rxr->rx_ring_struct = ring;
ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)rxr->rx_desc_ring;
@ -286,14 +300,27 @@ void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq)
ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
ring->vmem = (void **)&rxr->rx_buf_ring;
cpr = rxq->cp_ring;
ring = cpr->cp_ring_struct;
cpr = rte_zmalloc_socket("bnxt_rx_ring",
sizeof(struct bnxt_cp_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
if (cpr == NULL)
return -ENOMEM;
rxq->cp_ring = cpr;
ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
if (ring == NULL)
return -ENOMEM;
cpr->cp_ring_struct = ring;
ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->vmem_size = 0;
ring->vmem = NULL;
return 0;
}
static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
@ -320,7 +347,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
/* TODO: These need to be allocated */
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size);

View File

@ -56,7 +56,7 @@ struct bnxt_rx_ring_info {
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
void bnxt_free_rx_rings(struct bnxt *bp);
void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
#endif

View File

@ -49,8 +49,6 @@ void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
{
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
/* 'Unreserve' rte_memzone */
if (cpr->hw_stats)
cpr->hw_stats = NULL;
}
@ -108,10 +106,12 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_tx_queue *txq;
int rc = 0;
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
return -EINVAL;
rc = -EINVAL;
goto out;
}
if (eth_dev->data->tx_queues) {
@ -123,15 +123,18 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
}
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL) {
if (!txq) {
RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
txq->bp = bp;
txq->nb_tx_desc = nb_desc;
txq->tx_free_thresh = tx_conf->tx_free_thresh;
bnxt_init_tx_ring_struct(txq);
rc = bnxt_init_tx_ring_struct(txq, socket_id);
if (rc)
goto out;
txq->queue_id = queue_idx;
txq->port_id = eth_dev->data->port_id;
@ -141,15 +144,19 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
"txr")) {
RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
if (bnxt_init_one_tx_ring(txq)) {
RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
eth_dev->data->tx_queues[queue_idx] = txq;
return 0;
out:
return rc;
}

View File

@ -59,9 +59,12 @@ void bnxt_free_tx_rings(struct bnxt *bp)
continue;
bnxt_free_ring(txq->tx_ring->tx_ring_struct);
/* TODO: free() txq->tx_ring and txq->tx_ring->tx_ring_struct */
rte_free(txq->tx_ring->tx_ring_struct);
rte_free(txq->tx_ring);
bnxt_free_ring(txq->cp_ring->cp_ring_struct);
/* TODO: free() txq->cp_ring and txq->cp_ring->cp_ring_struct */
rte_free(txq->cp_ring->cp_ring_struct);
rte_free(txq->cp_ring);
rte_free(txq);
bp->tx_queues[i] = NULL;
@ -79,15 +82,25 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
return 0;
}
void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq)
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr;
struct bnxt_tx_ring_info *txr;
struct bnxt_ring *ring;
/* TODO: These need to be allocated */
txr = txq->tx_ring;
ring = txr->tx_ring_struct;
txr = rte_zmalloc_socket("bnxt_tx_ring",
sizeof(struct bnxt_tx_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
if (txr == NULL)
return -ENOMEM;
txq->tx_ring = txr;
ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
@ -95,15 +108,27 @@ void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq)
ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
ring->vmem = (void **)&txr->tx_buf_ring;
/* TODO: These need to be allocated */
cpr = txq->cp_ring;
ring = cpr->cp_ring_struct;
cpr = rte_zmalloc_socket("bnxt_tx_ring",
sizeof(struct bnxt_cp_ring_info),
RTE_CACHE_LINE_SIZE, socket_id);
if (cpr == NULL)
return -ENOMEM;
txq->cp_ring = cpr;
ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
sizeof(struct bnxt_ring),
RTE_CACHE_LINE_SIZE, socket_id);
if (ring == NULL)
return -ENOMEM;
cpr->cp_ring_struct = ring;
ring->ring_size = txr->tx_ring_struct->ring_size;
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->vmem_size = 0;
ring->vmem = NULL;
return 0;
}
static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)

View File

@ -64,7 +64,7 @@ struct bnxt_sw_tx_bd {
void bnxt_free_tx_rings(struct bnxt *bp);
int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);