net/bnxt: reduce CQ queue size without aggregation ring
Don't allocate extra completion queue entries for aggregation ring when aggregation ring will not be used. Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
This commit is contained in:
parent
cec43bbf38
commit
04067844a3
@ -1295,6 +1295,8 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
|
||||
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||
|
||||
eth_dev->data->dev_started = 0;
|
||||
eth_dev->data->scattered_rx = 0;
|
||||
|
||||
/* Prevent crashes when queues are still in use */
|
||||
eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
|
||||
eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
|
||||
@ -2693,14 +2695,12 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
||||
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
|
||||
|
||||
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
|
||||
/*
|
||||
* If vector-mode tx/rx is active, disallow any MTU change that would
|
||||
* require scattered receive support.
|
||||
* Disallow any MTU change that would require scattered receive support
|
||||
* if it is not already enabled.
|
||||
*/
|
||||
if (eth_dev->data->dev_started &&
|
||||
(eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
|
||||
eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
|
||||
!eth_dev->data->scattered_rx &&
|
||||
(new_pkt_size >
|
||||
eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
@ -2708,7 +2708,6 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (new_mtu > RTE_ETHER_MTU) {
|
||||
bp->flags |= BNXT_FLAG_JUMBO;
|
||||
|
@ -938,9 +938,12 @@ void bnxt_free_rx_rings(struct bnxt *bp)
|
||||
|
||||
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
|
||||
{
|
||||
struct rte_eth_dev *eth_dev = rxq->bp->eth_dev;
|
||||
struct rte_eth_rxmode *rxmode;
|
||||
struct bnxt_cp_ring_info *cpr;
|
||||
struct bnxt_rx_ring_info *rxr;
|
||||
struct bnxt_ring *ring;
|
||||
bool use_agg_ring;
|
||||
|
||||
rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
|
||||
|
||||
@ -978,8 +981,22 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
|
||||
if (ring == NULL)
|
||||
return -ENOMEM;
|
||||
cpr->cp_ring_struct = ring;
|
||||
ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
|
||||
(2 + AGG_RING_SIZE_FACTOR));
|
||||
|
||||
rxmode = ð_dev->data->dev_conf.rxmode;
|
||||
use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
|
||||
(rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) ||
|
||||
(rxmode->max_rx_pkt_len >
|
||||
(uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
|
||||
RTE_PKTMBUF_HEADROOM));
|
||||
|
||||
/* Allocate two completion slots per entry in desc ring. */
|
||||
ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
|
||||
|
||||
/* Allocate additional slots if aggregation ring is in use. */
|
||||
if (use_agg_ring)
|
||||
ring->ring_size *= AGG_RING_SIZE_FACTOR;
|
||||
|
||||
ring->ring_size = rte_align32pow2(ring->ring_size);
|
||||
ring->ring_mask = ring->ring_size - 1;
|
||||
ring->bd = (void *)cpr->cp_desc_ring;
|
||||
ring->bd_dma = cpr->cp_desc_mapping;
|
||||
|
Loading…
x
Reference in New Issue
Block a user