net/bnxt: modify ring index logic
Change the ring logic so that the index increments unbounded and mask it only when needed. Modify the existing macros so that the index is not masked. Add a new macro RING_IDX() to mask it only when needed. Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Lance Richardson <lance.richardson@broadcom.com>
This commit is contained in:
parent
68e2bebf7b
commit
c7de4195cc
@ -45,7 +45,7 @@ struct bnxt_db_info;
|
||||
} while (0)
|
||||
#define B_CP_DB_REARM(cpr, raw_cons) \
|
||||
rte_write32((DB_CP_REARM_FLAGS | \
|
||||
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
|
||||
DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
|
||||
((cpr)->cp_db.doorbell))
|
||||
|
||||
#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), \
|
||||
@ -65,8 +65,8 @@ struct bnxt_db_info;
|
||||
} while (0)
|
||||
#define B_CP_DIS_DB(cpr, raw_cons) \
|
||||
rte_write32_relaxed((DB_CP_FLAGS | \
|
||||
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
|
||||
((cpr)->cp_db.doorbell))
|
||||
DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
|
||||
((cpr)->cp_db.doorbell))
|
||||
|
||||
#define B_CP_DB(cpr, raw_cons, ring_mask) \
|
||||
rte_write32((DB_CP_FLAGS | \
|
||||
@ -80,8 +80,11 @@ struct bnxt_db_info {
|
||||
uint32_t db_key32;
|
||||
};
|
||||
bool db_64;
|
||||
uint32_t db_ring_mask;
|
||||
};
|
||||
|
||||
#define DB_RING_IDX(db, idx) ((idx) & (db)->db_ring_mask)
|
||||
|
||||
struct bnxt_ring;
|
||||
struct bnxt_cp_ring_info {
|
||||
uint32_t cp_raw_cons;
|
||||
@ -95,7 +98,6 @@ struct bnxt_cp_ring_info {
|
||||
uint32_t hw_stats_ctx_id;
|
||||
|
||||
struct bnxt_ring *cp_ring_struct;
|
||||
uint16_t cp_cons;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
|
@ -2601,8 +2601,8 @@ bnxt_free_all_hwrm_rings(struct bnxt *bp)
|
||||
memset(txr->tx_buf_ring, 0,
|
||||
txr->tx_ring_struct->ring_size *
|
||||
sizeof(*txr->tx_buf_ring));
|
||||
txr->tx_prod = 0;
|
||||
txr->tx_cons = 0;
|
||||
txr->tx_raw_prod = 0;
|
||||
txr->tx_raw_cons = 0;
|
||||
}
|
||||
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
|
||||
bnxt_free_cp_ring(bp, cpr);
|
||||
|
@ -55,12 +55,12 @@ bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
|
||||
mask = rep_rxr->rx_ring_struct->ring_mask;
|
||||
|
||||
/* Put this mbuf on the RxQ of the Representor */
|
||||
prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_prod & mask];
|
||||
prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_raw_prod & mask];
|
||||
if (*prod_rx_buf == NULL) {
|
||||
*prod_rx_buf = mbuf;
|
||||
vfr_bp->rx_bytes[que] += mbuf->pkt_len;
|
||||
vfr_bp->rx_pkts[que]++;
|
||||
rep_rxr->rx_prod++;
|
||||
rep_rxr->rx_raw_prod++;
|
||||
} else {
|
||||
/* Representor Rx ring full, drop pkt */
|
||||
vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
|
||||
|
@ -342,7 +342,8 @@ static void bnxt_set_db(struct bnxt *bp,
|
||||
struct bnxt_db_info *db,
|
||||
uint32_t ring_type,
|
||||
uint32_t map_idx,
|
||||
uint32_t fid)
|
||||
uint32_t fid,
|
||||
uint32_t ring_mask)
|
||||
{
|
||||
if (BNXT_CHIP_P5(bp)) {
|
||||
if (BNXT_PF(bp))
|
||||
@ -381,6 +382,7 @@ static void bnxt_set_db(struct bnxt *bp,
|
||||
}
|
||||
db->db_64 = false;
|
||||
}
|
||||
db->db_ring_mask = ring_mask;
|
||||
}
|
||||
|
||||
static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
|
||||
@ -409,9 +411,9 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cpr->cp_cons = 0;
|
||||
cpr->cp_raw_cons = 0;
|
||||
bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
|
||||
cp_ring->fw_ring_id);
|
||||
cp_ring->fw_ring_id, cp_ring->ring_mask);
|
||||
bnxt_db_cq(cpr);
|
||||
|
||||
return 0;
|
||||
@ -472,7 +474,7 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
|
||||
}
|
||||
|
||||
bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index,
|
||||
ring->fw_ring_id);
|
||||
ring->fw_ring_id, ring->ring_mask);
|
||||
bnxt_db_nq(nqr);
|
||||
|
||||
bp->rxtx_nq_ring = nqr;
|
||||
@ -515,11 +517,12 @@ static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rxr->rx_prod = 0;
|
||||
rxr->rx_raw_prod = 0;
|
||||
if (BNXT_HAS_RING_GRPS(bp))
|
||||
bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
|
||||
bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id,
|
||||
ring->ring_mask);
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -551,11 +554,12 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rxr->ag_prod = 0;
|
||||
rxr->ag_raw_prod = 0;
|
||||
if (BNXT_HAS_RING_GRPS(bp))
|
||||
bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
|
||||
bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
|
||||
bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id,
|
||||
ring->ring_mask);
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -604,8 +608,8 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
|
||||
}
|
||||
rxq->index = queue_index;
|
||||
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
|
||||
@ -710,8 +714,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
|
||||
rxq->index = i;
|
||||
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
|
||||
bnxt_rxq_vec_setup(rxq);
|
||||
@ -744,7 +748,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
|
||||
if (rc)
|
||||
goto err_out;
|
||||
|
||||
bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
|
||||
bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id,
|
||||
ring->ring_mask);
|
||||
txq->index = idx;
|
||||
bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
|
||||
}
|
||||
@ -777,10 +782,10 @@ int bnxt_alloc_async_cp_ring(struct bnxt *bp)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cpr->cp_cons = 0;
|
||||
cpr->cp_raw_cons = 0;
|
||||
cpr->valid = 0;
|
||||
bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
|
||||
cp_ring->fw_ring_id);
|
||||
cp_ring->fw_ring_id, cp_ring->ring_mask);
|
||||
|
||||
if (BNXT_HAS_NQ(bp))
|
||||
bnxt_db_nq(cpr);
|
||||
|
@ -10,8 +10,9 @@
|
||||
|
||||
#include <rte_memory.h>
|
||||
|
||||
#define RING_ADV(ring, idx, n) (((idx) + (n)) & (ring)->ring_mask)
|
||||
#define RING_NEXT(ring, idx) RING_ADV(ring, idx, 1)
|
||||
#define RING_ADV(idx, n) ((idx) + (n))
|
||||
#define RING_NEXT(idx) RING_ADV(idx, 1)
|
||||
#define RING_IDX(ring, idx) ((idx) & (ring)->ring_mask)
|
||||
|
||||
#define DB_IDX_MASK 0xffffff
|
||||
#define DB_IDX_VALID (0x1 << 26)
|
||||
@ -82,38 +83,51 @@ void bnxt_free_rxtx_nq_ring(struct bnxt *bp);
|
||||
|
||||
static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
|
||||
{
|
||||
if (db->db_64)
|
||||
rte_write64(db->db_key64 | idx, db->doorbell);
|
||||
else
|
||||
rte_write32(db->db_key32 | idx, db->doorbell);
|
||||
uint32_t db_idx = DB_RING_IDX(db, idx);
|
||||
void *doorbell = db->doorbell;
|
||||
|
||||
if (db->db_64) {
|
||||
uint64_t key_idx = db->db_key64 | db_idx;
|
||||
|
||||
rte_write64(key_idx, doorbell);
|
||||
} else {
|
||||
uint32_t key_idx = db->db_key32 | db_idx;
|
||||
|
||||
rte_write32(key_idx, doorbell);
|
||||
}
|
||||
}
|
||||
|
||||
/* Ring an NQ doorbell and disable interrupts for the ring. */
|
||||
static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr)
|
||||
{
|
||||
uint32_t db_idx = DB_RING_IDX(&cpr->cp_db, cpr->cp_raw_cons);
|
||||
uint64_t key_idx = cpr->cp_db.db_key64 | DBR_TYPE_NQ | db_idx;
|
||||
void *doorbell = cpr->cp_db.doorbell;
|
||||
|
||||
|
||||
if (unlikely(!cpr->cp_db.db_64))
|
||||
return;
|
||||
|
||||
rte_write64(cpr->cp_db.db_key64 | DBR_TYPE_NQ |
|
||||
RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
|
||||
cpr->cp_db.doorbell);
|
||||
rte_write64(key_idx, doorbell);
|
||||
}
|
||||
|
||||
/* Ring an NQ doorbell and enable interrupts for the ring. */
|
||||
static inline void bnxt_db_nq_arm(struct bnxt_cp_ring_info *cpr)
|
||||
{
|
||||
uint32_t db_idx = DB_RING_IDX(&cpr->cp_db, cpr->cp_raw_cons);
|
||||
uint64_t key_idx = cpr->cp_db.db_key64 | DBR_TYPE_NQ_ARM | db_idx;
|
||||
void *doorbell = cpr->cp_db.doorbell;
|
||||
|
||||
if (unlikely(!cpr->cp_db.db_64))
|
||||
return;
|
||||
|
||||
rte_write64(cpr->cp_db.db_key64 | DBR_TYPE_NQ_ARM |
|
||||
RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
|
||||
cpr->cp_db.doorbell);
|
||||
rte_write64(key_idx, doorbell);
|
||||
}
|
||||
|
||||
static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr)
|
||||
{
|
||||
struct bnxt_db_info *db = &cpr->cp_db;
|
||||
uint32_t idx = RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons);
|
||||
uint32_t idx = DB_RING_IDX(&cpr->cp_db, cpr->cp_raw_cons);
|
||||
|
||||
if (db->db_64) {
|
||||
uint64_t key_idx = db->db_key64 | idx;
|
||||
|
@ -39,12 +39,15 @@ static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
|
||||
|
||||
static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
|
||||
struct bnxt_rx_ring_info *rxr,
|
||||
uint16_t prod)
|
||||
uint16_t raw_prod)
|
||||
{
|
||||
struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
|
||||
struct rte_mbuf **rx_buf = &rxr->rx_buf_ring[prod];
|
||||
uint16_t prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
|
||||
struct rx_prod_pkt_bd *rxbd;
|
||||
struct rte_mbuf **rx_buf;
|
||||
struct rte_mbuf *mbuf;
|
||||
|
||||
rxbd = &rxr->rx_desc_ring[prod];
|
||||
rx_buf = &rxr->rx_buf_ring[prod];
|
||||
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
|
||||
if (!mbuf) {
|
||||
rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
|
||||
@ -61,12 +64,15 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
|
||||
|
||||
static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
|
||||
struct bnxt_rx_ring_info *rxr,
|
||||
uint16_t prod)
|
||||
uint16_t raw_prod)
|
||||
{
|
||||
struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
|
||||
struct rte_mbuf **rx_buf = &rxr->ag_buf_ring[prod];
|
||||
uint16_t prod = RING_IDX(rxr->ag_ring_struct, raw_prod);
|
||||
struct rx_prod_pkt_bd *rxbd;
|
||||
struct rte_mbuf **rx_buf;
|
||||
struct rte_mbuf *mbuf;
|
||||
|
||||
rxbd = &rxr->ag_desc_ring[prod];
|
||||
rx_buf = &rxr->ag_buf_ring[prod];
|
||||
if (rxbd == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
|
||||
return -EINVAL;
|
||||
@ -94,10 +100,11 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
|
||||
static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
|
||||
struct rte_mbuf *mbuf)
|
||||
{
|
||||
uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
|
||||
uint16_t prod, raw_prod = RING_NEXT(rxr->rx_raw_prod);
|
||||
struct rte_mbuf **prod_rx_buf;
|
||||
struct rx_prod_pkt_bd *prod_bd;
|
||||
|
||||
prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
|
||||
prod_rx_buf = &rxr->rx_buf_ring[prod];
|
||||
|
||||
RTE_ASSERT(*prod_rx_buf == NULL);
|
||||
@ -109,7 +116,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
|
||||
|
||||
prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
|
||||
|
||||
rxr->rx_prod = prod;
|
||||
rxr->rx_raw_prod = raw_prod;
|
||||
}
|
||||
|
||||
static inline
|
||||
@ -119,7 +126,7 @@ struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
|
||||
struct rte_mbuf **cons_rx_buf;
|
||||
struct rte_mbuf *mbuf;
|
||||
|
||||
cons_rx_buf = &rxr->rx_buf_ring[cons];
|
||||
cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)];
|
||||
RTE_ASSERT(*cons_rx_buf != NULL);
|
||||
mbuf = *cons_rx_buf;
|
||||
*cons_rx_buf = NULL;
|
||||
@ -175,7 +182,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
|
||||
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
|
||||
|
||||
/* recycle next mbuf */
|
||||
data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
|
||||
data_cons = RING_NEXT(data_cons);
|
||||
bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
|
||||
}
|
||||
|
||||
@ -198,18 +205,20 @@ static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
|
||||
static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
|
||||
{
|
||||
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
|
||||
uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
|
||||
uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod);
|
||||
uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
|
||||
|
||||
/* TODO batch allocation for better performance */
|
||||
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
|
||||
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"agg mbuf alloc failed: prod=0x%x\n", next);
|
||||
while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
|
||||
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
|
||||
PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
|
||||
raw_next);
|
||||
break;
|
||||
}
|
||||
rte_bitmap_clear(rxr->ag_bitmap, next);
|
||||
rxr->ag_prod = next;
|
||||
next = RING_NEXT(rxr->ag_ring_struct, next);
|
||||
rte_bitmap_clear(rxr->ag_bitmap, bmap_next);
|
||||
rxr->ag_raw_prod = raw_next;
|
||||
raw_next = RING_NEXT(raw_next);
|
||||
bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -666,7 +675,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
|
||||
struct rx_pkt_cmpl *rxcmp;
|
||||
struct rx_pkt_cmpl_hi *rxcmp1;
|
||||
uint32_t tmp_raw_cons = *raw_cons;
|
||||
uint16_t cons, prod, cp_cons =
|
||||
uint16_t cons, raw_prod, cp_cons =
|
||||
RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
|
||||
struct rte_mbuf *mbuf;
|
||||
int rc = 0;
|
||||
@ -726,7 +735,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
|
||||
if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
|
||||
return -EBUSY;
|
||||
|
||||
prod = rxr->rx_prod;
|
||||
raw_prod = rxr->rx_raw_prod;
|
||||
|
||||
cons = rxcmp->opaque;
|
||||
mbuf = bnxt_consume_rx_buf(rxr, cons);
|
||||
@ -786,13 +795,14 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
|
||||
* calls in favour of a tight loop with the same function being called
|
||||
* in it.
|
||||
*/
|
||||
prod = RING_NEXT(rxr->rx_ring_struct, prod);
|
||||
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
|
||||
PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
|
||||
raw_prod = RING_NEXT(raw_prod);
|
||||
if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
|
||||
PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
|
||||
raw_prod);
|
||||
rc = -ENOMEM;
|
||||
goto rx;
|
||||
}
|
||||
rxr->rx_prod = prod;
|
||||
rxr->rx_raw_prod = raw_prod;
|
||||
|
||||
if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
|
||||
vfr_flag) {
|
||||
@ -826,13 +836,13 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
struct bnxt_rx_queue *rxq = rx_queue;
|
||||
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
|
||||
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
|
||||
uint16_t rx_raw_prod = rxr->rx_raw_prod;
|
||||
uint16_t ag_raw_prod = rxr->ag_raw_prod;
|
||||
uint32_t raw_cons = cpr->cp_raw_cons;
|
||||
uint32_t cons;
|
||||
int nb_rx_pkts = 0;
|
||||
int nb_rep_rx_pkts = 0;
|
||||
struct rx_pkt_cmpl *rxcmp;
|
||||
uint16_t prod = rxr->rx_prod;
|
||||
uint16_t ag_prod = rxr->ag_prod;
|
||||
int rc = 0;
|
||||
bool evt = false;
|
||||
|
||||
@ -850,8 +860,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
*/
|
||||
while (unlikely(rxq->rxrearm_nb)) {
|
||||
if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
|
||||
rxr->rx_prod = rxq->rxrearm_start;
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
rxr->rx_raw_prod = rxq->rxrearm_start;
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
rxq->rxrearm_start++;
|
||||
rxq->rxrearm_nb--;
|
||||
} else {
|
||||
@ -895,7 +905,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
break;
|
||||
/* Post some Rx buf early in case of larger burst processing */
|
||||
if (nb_rx_pkts == BNXT_RX_POST_THRESH)
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
}
|
||||
|
||||
cpr->cp_raw_cons = raw_cons;
|
||||
@ -907,23 +917,27 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (prod != rxr->rx_prod)
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
rte_compiler_barrier();
|
||||
if (rx_raw_prod != rxr->rx_raw_prod)
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
|
||||
rte_compiler_barrier();
|
||||
/* Ring the AGG ring DB */
|
||||
if (ag_prod != rxr->ag_prod)
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
|
||||
if (ag_raw_prod != rxr->ag_raw_prod)
|
||||
bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
|
||||
|
||||
bnxt_db_cq(cpr);
|
||||
|
||||
/* Attempt to alloc Rx buf in case of a previous allocation failure. */
|
||||
if (rc == -ENOMEM) {
|
||||
int i = RING_NEXT(rxr->rx_ring_struct, prod);
|
||||
int i = RING_NEXT(rx_raw_prod);
|
||||
int cnt = nb_rx_pkts;
|
||||
|
||||
for (; cnt;
|
||||
i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) {
|
||||
struct rte_mbuf **rx_buf = &rxr->rx_buf_ring[i];
|
||||
for (; nb_rx_pkts; i = RING_NEXT(i), cnt--) {
|
||||
struct rte_mbuf **rx_buf;
|
||||
uint16_t rx_raw_prod = RING_IDX(rxr->rx_ring_struct, i);
|
||||
|
||||
rx_buf = &rxr->rx_buf_ring[rx_raw_prod];
|
||||
|
||||
/* Buffer already allocated for this index. */
|
||||
if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
|
||||
@ -931,8 +945,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
|
||||
/* This slot is empty. Alloc buffer for Rx */
|
||||
if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
|
||||
rxr->rx_prod = i;
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
|
||||
rxr->rx_raw_prod = i;
|
||||
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
|
||||
} else {
|
||||
PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
|
||||
break;
|
||||
@ -1100,7 +1114,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
|
||||
{
|
||||
struct bnxt_rx_ring_info *rxr;
|
||||
struct bnxt_ring *ring;
|
||||
uint32_t prod, type;
|
||||
uint32_t raw_prod, type;
|
||||
unsigned int i;
|
||||
uint16_t size;
|
||||
|
||||
@ -1119,18 +1133,18 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
|
||||
ring = rxr->rx_ring_struct;
|
||||
bnxt_init_rxbds(ring, type, size);
|
||||
|
||||
prod = rxr->rx_prod;
|
||||
raw_prod = rxr->rx_raw_prod;
|
||||
for (i = 0; i < ring->ring_size; i++) {
|
||||
if (unlikely(!rxr->rx_buf_ring[i])) {
|
||||
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
|
||||
if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) {
|
||||
PMD_DRV_LOG(WARNING,
|
||||
"init'ed rx ring %d with %d/%d mbufs only\n",
|
||||
rxq->queue_id, i, ring->ring_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rxr->rx_prod = prod;
|
||||
prod = RING_NEXT(rxr->rx_ring_struct, prod);
|
||||
rxr->rx_raw_prod = raw_prod;
|
||||
raw_prod = RING_NEXT(raw_prod);
|
||||
}
|
||||
|
||||
/* Initialize dummy mbuf pointers for vector mode rx. */
|
||||
@ -1142,19 +1156,19 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
|
||||
ring = rxr->ag_ring_struct;
|
||||
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
|
||||
bnxt_init_rxbds(ring, type, size);
|
||||
prod = rxr->ag_prod;
|
||||
raw_prod = rxr->ag_raw_prod;
|
||||
|
||||
for (i = 0; i < ring->ring_size; i++) {
|
||||
if (unlikely(!rxr->ag_buf_ring[i])) {
|
||||
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
|
||||
if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) {
|
||||
PMD_DRV_LOG(WARNING,
|
||||
"init'ed AG ring %d with %d/%d mbufs only\n",
|
||||
rxq->queue_id, i, ring->ring_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rxr->ag_prod = prod;
|
||||
prod = RING_NEXT(rxr->ag_ring_struct, prod);
|
||||
rxr->ag_raw_prod = raw_prod;
|
||||
raw_prod = RING_NEXT(raw_prod);
|
||||
}
|
||||
PMD_DRV_LOG(DEBUG, "AGG Done!\n");
|
||||
|
||||
|
@ -50,8 +50,8 @@ struct bnxt_tpa_info {
|
||||
};
|
||||
|
||||
struct bnxt_rx_ring_info {
|
||||
uint16_t rx_prod;
|
||||
uint16_t ag_prod;
|
||||
uint16_t rx_raw_prod;
|
||||
uint16_t ag_raw_prod;
|
||||
uint16_t rx_cons; /* Needed for representor */
|
||||
struct bnxt_db_info rx_db;
|
||||
struct bnxt_db_info ag_db;
|
||||
|
@ -103,23 +103,23 @@ static inline void
|
||||
bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
|
||||
struct rte_mbuf **free = txq->free;
|
||||
uint16_t cons = txr->tx_cons;
|
||||
uint16_t cons, raw_cons = txr->tx_raw_cons;
|
||||
unsigned int blk = 0;
|
||||
uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
|
||||
|
||||
while (nr_pkts--) {
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
|
||||
cons = raw_cons++ & ring_mask;
|
||||
tx_buf = &txr->tx_buf_ring[cons];
|
||||
cons = (cons + 1) & ring_mask;
|
||||
free[blk++] = tx_buf->mbuf;
|
||||
tx_buf->mbuf = NULL;
|
||||
}
|
||||
if (blk)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
|
||||
|
||||
txr->tx_cons = cons;
|
||||
txr->tx_raw_cons = raw_cons;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -127,7 +127,7 @@ bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
struct rte_mbuf **free = txq->free;
|
||||
uint16_t cons = txr->tx_cons;
|
||||
uint16_t cons, raw_cons = txr->tx_raw_cons;
|
||||
unsigned int blk = 0;
|
||||
uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
|
||||
|
||||
@ -135,8 +135,8 @@ bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
struct rte_mbuf *mbuf;
|
||||
|
||||
cons = raw_cons++ & ring_mask;
|
||||
tx_buf = &txr->tx_buf_ring[cons];
|
||||
cons = (cons + 1) & ring_mask;
|
||||
mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
|
||||
if (unlikely(mbuf == NULL))
|
||||
continue;
|
||||
@ -151,6 +151,6 @@ bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
if (blk)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
|
||||
|
||||
txr->tx_cons = cons;
|
||||
txr->tx_raw_cons = raw_cons;
|
||||
}
|
||||
#endif /* _BNXT_RXTX_VEC_COMMON_H_ */
|
||||
|
@ -295,8 +295,7 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
|
||||
out:
|
||||
if (nb_rx_pkts) {
|
||||
rxr->rx_prod =
|
||||
RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
|
||||
rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
|
||||
|
||||
rxq->rxrearm_nb += nb_rx_pkts;
|
||||
cpr->cp_raw_cons += 2 * nb_rx_pkts;
|
||||
@ -353,7 +352,7 @@ bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
{
|
||||
struct bnxt_tx_queue *txq = tx_queue;
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
uint16_t prod = txr->tx_prod;
|
||||
uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
|
||||
struct rte_mbuf *tx_mbuf;
|
||||
struct tx_bd_long *txbd = NULL;
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
@ -370,16 +369,17 @@ bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
tx_mbuf = *tx_pkts++;
|
||||
rte_prefetch0(tx_mbuf);
|
||||
|
||||
tx_buf = &txr->tx_buf_ring[prod];
|
||||
tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
|
||||
tx_buf = &txr->tx_buf_ring[tx_prod];
|
||||
tx_buf->mbuf = tx_mbuf;
|
||||
tx_buf->nr_bds = 1;
|
||||
|
||||
txbd = &txr->tx_desc_ring[prod];
|
||||
txbd = &txr->tx_desc_ring[tx_prod];
|
||||
txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
|
||||
txbd->len = tx_mbuf->data_len;
|
||||
txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
|
||||
TX_BD_FLAGS_NOCMPL);
|
||||
prod = RING_NEXT(txr->tx_ring_struct, prod);
|
||||
tx_raw_prod = RING_NEXT(tx_raw_prod);
|
||||
to_send--;
|
||||
}
|
||||
|
||||
@ -390,9 +390,9 @@ bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
}
|
||||
|
||||
rte_compiler_barrier();
|
||||
bnxt_db_write(&txr->tx_db, prod);
|
||||
bnxt_db_write(&txr->tx_db, tx_raw_prod);
|
||||
|
||||
txr->tx_prod = prod;
|
||||
txr->tx_raw_prod = tx_raw_prod;
|
||||
|
||||
return nb_pkts;
|
||||
}
|
||||
|
@ -277,8 +277,7 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
|
||||
out:
|
||||
if (nb_rx_pkts) {
|
||||
rxr->rx_prod =
|
||||
RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
|
||||
rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
|
||||
|
||||
rxq->rxrearm_nb += nb_rx_pkts;
|
||||
cpr->cp_raw_cons += 2 * nb_rx_pkts;
|
||||
@ -351,11 +350,12 @@ bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
uint16_t tx_prod = txr->tx_prod;
|
||||
uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
|
||||
struct tx_bd_long *txbd;
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
uint16_t to_send;
|
||||
|
||||
tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
|
||||
txbd = &txr->tx_desc_ring[tx_prod];
|
||||
tx_buf = &txr->tx_buf_ring[tx_prod];
|
||||
|
||||
@ -395,10 +395,10 @@ bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **tx_pkts,
|
||||
txbd[-1].opaque = nb_pkts;
|
||||
txbd[-1].flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
|
||||
|
||||
tx_prod = RING_ADV(txr->tx_ring_struct, tx_prod, nb_pkts);
|
||||
bnxt_db_write(&txr->tx_db, tx_prod);
|
||||
tx_raw_prod += nb_pkts;
|
||||
bnxt_db_write(&txr->tx_db, tx_raw_prod);
|
||||
|
||||
txr->tx_prod = tx_prod;
|
||||
txr->tx_raw_prod = tx_raw_prod;
|
||||
|
||||
return nb_pkts;
|
||||
}
|
||||
@ -435,8 +435,8 @@ bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
* Ensure that a ring wrap does not occur within a call to
|
||||
* bnxt_xmit_fixed_burst_vec().
|
||||
*/
|
||||
num = RTE_MIN(num,
|
||||
ring_size - (txr->tx_prod & (ring_size - 1)));
|
||||
num = RTE_MIN(num, ring_size -
|
||||
(txr->tx_raw_prod & (ring_size - 1)));
|
||||
ret = bnxt_xmit_fixed_burst_vec(txq, &tx_pkts[nb_sent], num);
|
||||
nb_sent += ret;
|
||||
nb_pkts -= ret;
|
||||
|
@ -110,12 +110,14 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
struct tx_bd_long **last_txbd)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
struct bnxt_ring *ring = txr->tx_ring_struct;
|
||||
uint32_t outer_tpid_bd = 0;
|
||||
struct tx_bd_long *txbd;
|
||||
struct tx_bd_long_hi *txbd1 = NULL;
|
||||
uint32_t vlan_tag_flags;
|
||||
bool long_bd = false;
|
||||
unsigned short nr_bds = 0;
|
||||
uint16_t prod;
|
||||
struct rte_mbuf *m_seg;
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
static const uint32_t lhint_arr[4] = {
|
||||
@ -168,11 +170,12 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
/* Check non zero data_len */
|
||||
RTE_VERIFY(tx_pkt->data_len);
|
||||
|
||||
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
|
||||
prod = RING_IDX(ring, txr->tx_raw_prod);
|
||||
tx_buf = &txr->tx_buf_ring[prod];
|
||||
tx_buf->mbuf = tx_pkt;
|
||||
tx_buf->nr_bds = nr_bds;
|
||||
|
||||
txbd = &txr->tx_desc_ring[txr->tx_prod];
|
||||
txbd = &txr->tx_desc_ring[prod];
|
||||
txbd->opaque = *coal_pkts;
|
||||
txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
|
||||
txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
|
||||
@ -210,10 +213,10 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
|
||||
}
|
||||
|
||||
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
|
||||
txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
|
||||
|
||||
txbd1 = (struct tx_bd_long_hi *)
|
||||
&txr->tx_desc_ring[txr->tx_prod];
|
||||
prod = RING_IDX(ring, txr->tx_raw_prod);
|
||||
txbd1 = (struct tx_bd_long_hi *)&txr->tx_desc_ring[prod];
|
||||
txbd1->lflags = 0;
|
||||
txbd1->cfa_meta = vlan_tag_flags;
|
||||
/* Legacy tx_bd_long_hi->mss =
|
||||
@ -318,11 +321,13 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
while (m_seg) {
|
||||
/* Check non zero data_len */
|
||||
RTE_VERIFY(m_seg->data_len);
|
||||
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
|
||||
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
|
||||
txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
|
||||
|
||||
prod = RING_IDX(ring, txr->tx_raw_prod);
|
||||
tx_buf = &txr->tx_buf_ring[prod];
|
||||
tx_buf->mbuf = m_seg;
|
||||
|
||||
txbd = &txr->tx_desc_ring[txr->tx_prod];
|
||||
txbd = &txr->tx_desc_ring[prod];
|
||||
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
|
||||
txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
|
||||
txbd->len = m_seg->data_len;
|
||||
@ -332,7 +337,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
|
||||
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
|
||||
|
||||
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
|
||||
txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -344,8 +349,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
struct bnxt_ring *ring = txr->tx_ring_struct;
|
||||
struct rte_mbuf **free = txq->free;
|
||||
uint16_t cons = txr->tx_cons;
|
||||
uint16_t raw_cons = txr->tx_raw_cons;
|
||||
unsigned int blk = 0;
|
||||
int i, j;
|
||||
|
||||
@ -353,7 +359,7 @@ static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
unsigned short nr_bds;
|
||||
|
||||
tx_buf = &txr->tx_buf_ring[cons];
|
||||
tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
|
||||
nr_bds = tx_buf->nr_bds;
|
||||
for (j = 0; j < nr_bds; j++) {
|
||||
if (tx_buf->mbuf) {
|
||||
@ -361,35 +367,38 @@ static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
free[blk++] = tx_buf->mbuf;
|
||||
tx_buf->mbuf = NULL;
|
||||
}
|
||||
cons = RING_NEXT(txr->tx_ring_struct, cons);
|
||||
tx_buf = &txr->tx_buf_ring[cons];
|
||||
raw_cons = RING_NEXT(raw_cons);
|
||||
tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
|
||||
}
|
||||
}
|
||||
if (blk)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void *)free, blk);
|
||||
|
||||
txr->tx_cons = cons;
|
||||
txr->tx_raw_cons = raw_cons;
|
||||
}
|
||||
|
||||
static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
{
|
||||
struct bnxt_tx_ring_info *txr = txq->tx_ring;
|
||||
struct bnxt_ring *ring = txr->tx_ring_struct;
|
||||
struct rte_mempool *pool = NULL;
|
||||
struct rte_mbuf **free = txq->free;
|
||||
uint16_t cons = txr->tx_cons;
|
||||
uint16_t raw_cons = txr->tx_raw_cons;
|
||||
unsigned int blk = 0;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < nr_pkts; i++) {
|
||||
struct rte_mbuf *mbuf;
|
||||
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
|
||||
unsigned short nr_bds = tx_buf->nr_bds;
|
||||
struct bnxt_sw_tx_bd *tx_buf;
|
||||
unsigned short nr_bds;
|
||||
|
||||
tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
|
||||
nr_bds = tx_buf->nr_bds;
|
||||
for (j = 0; j < nr_bds; j++) {
|
||||
mbuf = tx_buf->mbuf;
|
||||
tx_buf->mbuf = NULL;
|
||||
cons = RING_NEXT(txr->tx_ring_struct, cons);
|
||||
tx_buf = &txr->tx_buf_ring[cons];
|
||||
raw_cons = RING_NEXT(raw_cons);
|
||||
tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
|
||||
if (!mbuf) /* long_bd's tx_buf ? */
|
||||
continue;
|
||||
|
||||
@ -422,7 +431,7 @@ static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
|
||||
if (blk)
|
||||
rte_mempool_put_bulk(pool, (void *)free, blk);
|
||||
|
||||
txr->tx_cons = cons;
|
||||
txr->tx_raw_cons = raw_cons;
|
||||
}
|
||||
|
||||
static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
|
||||
@ -504,7 +513,7 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
if (likely(nb_tx_pkts)) {
|
||||
/* Request a completion on the last packet */
|
||||
last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
|
||||
bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod);
|
||||
bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_raw_prod);
|
||||
}
|
||||
|
||||
return nb_tx_pkts;
|
||||
|
@ -12,8 +12,8 @@
|
||||
#define BNXT_MIN_PKT_SIZE 52
|
||||
|
||||
struct bnxt_tx_ring_info {
|
||||
uint16_t tx_prod;
|
||||
uint16_t tx_cons;
|
||||
uint16_t tx_raw_prod;
|
||||
uint16_t tx_raw_cons;
|
||||
struct bnxt_db_info tx_db;
|
||||
|
||||
struct tx_bd_long *tx_desc_ring;
|
||||
@ -31,7 +31,7 @@ struct bnxt_sw_tx_bd {
|
||||
|
||||
static inline uint32_t bnxt_tx_bds_in_hw(struct bnxt_tx_queue *txq)
|
||||
{
|
||||
return ((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
|
||||
return ((txq->tx_ring->tx_raw_prod - txq->tx_ring->tx_raw_cons) &
|
||||
txq->tx_ring->tx_ring_struct->ring_mask);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user