net/axgbe: support segmented Tx

Enable segmented Tx support and add jumbo packet transmit capability.

Signed-off-by: Bhagyada Modali <bhagyada.modali@amd.com>
Acked-by: Chandubabu Namburu <chandu@amd.com>
This commit is contained in:
Bhagyada Modali 2022-09-08 14:15:03 -04:00 committed by Andrew Rybchenko
parent 231435a5e6
commit 93bffd8fa0
4 changed files with 218 additions and 1 deletions

View File

@ -1228,6 +1228,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;

View File

@ -582,6 +582,7 @@ struct axgbe_port {
unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
unsigned int multi_segs_tx;
/* Rx settings */
unsigned int rx_sf_mode;

View File

@ -557,6 +557,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
unsigned int tsize;
const struct rte_memzone *tz;
uint64_t offloads;
struct rte_eth_dev_data *dev_data = dev->data;
tx_desc = nb_desc;
pdata = dev->data->dev_private;
@ -624,7 +625,13 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (!pdata->tx_queues)
pdata->tx_queues = dev->data->tx_queues;
if (txq->vector_disable ||
if ((dev_data->dev_conf.txmode.offloads &
RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
pdata->multi_segs_tx = true;
if (pdata->multi_segs_tx)
dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
else if (txq->vector_disable ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
dev->tx_pkt_burst = &axgbe_xmit_pkts;
else
@ -775,6 +782,29 @@ void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
}
/* Free Tx conformed mbufs segments */
static void
axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq)
{
volatile struct axgbe_tx_desc *desc;
uint16_t idx;
idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
while (txq->cur != txq->dirty) {
if (unlikely(idx == txq->nb_desc))
idx = 0;
desc = &txq->desc[idx];
/* Check for ownership */
if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
return;
memset((void *)&desc->desc2, 0, 8);
/* Free mbuf */
rte_pktmbuf_free_seg(txq->sw_ring[idx]);
txq->sw_ring[idx++] = NULL;
txq->dirty++;
}
}
/* Free Tx conformed mbufs */
static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
{
@ -867,6 +897,187 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
return 0;
}
/* Tx Descriptor formation for segmented mbuf
* Each mbuf will require multiple descriptors
*/
static int
axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
struct rte_mbuf *mbuf)
{
volatile struct axgbe_tx_desc *desc;
uint16_t idx;
uint64_t mask;
int start_index;
uint32_t pkt_len = 0;
int nb_desc_free;
struct rte_mbuf *tx_pkt;
nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
if (mbuf->nb_segs > nb_desc_free) {
axgbe_xmit_cleanup_seg(txq);
nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
if (unlikely(mbuf->nb_segs > nb_desc_free))
return RTE_ETH_TX_DESC_UNAVAIL;
}
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
desc = &txq->desc[idx];
/* Saving the start index for setting the OWN bit finally */
start_index = idx;
tx_pkt = mbuf;
/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
pkt_len = tx_pkt->pkt_len;
/* Update buffer address and length */
desc->baddr = rte_mbuf_data_iova(tx_pkt);
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
tx_pkt->data_len);
/* Total msg length to transmit */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
tx_pkt->pkt_len);
/* Timestamp enablement check */
if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
rte_wmb();
/* Mark it as First Descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
/* Mark it as a NORMAL descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
/* configure h/w Offload */
mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
/* Mark it as a CONTEXT descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
CTXT, 1);
/* Set the VLAN tag */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
VT, mbuf->vlan_tci);
/* Indicate this descriptor contains the VLAN tag */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
VLTV, 1);
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
TX_NORMAL_DESC2_VLAN_INSERT);
} else {
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
}
rte_wmb();
/* Save mbuf */
txq->sw_ring[idx] = tx_pkt;
/* Update current index*/
txq->cur++;
tx_pkt = tx_pkt->next;
while (tx_pkt != NULL) {
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
desc = &txq->desc[idx];
/* Update buffer address and length */
desc->baddr = rte_mbuf_data_iova(tx_pkt);
AXGMAC_SET_BITS_LE(desc->desc2,
TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
rte_wmb();
/* Mark it as a NORMAL descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
/* configure h/w Offload */
mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
mask == RTE_MBUF_F_TX_UDP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3,
TX_NORMAL_DESC3, CIC, 0x3);
else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3,
TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
/* Set OWN bit */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
/* Save mbuf */
txq->sw_ring[idx] = tx_pkt;
/* Update current index*/
txq->cur++;
tx_pkt = tx_pkt->next;
}
/* Set LD bit for the last descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
rte_wmb();
/* Update stats */
txq->bytes += pkt_len;
/* Set OWN bit for the first descriptor */
desc = &txq->desc[start_index];
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
return 0;
}
/* Eal supported tx wrapper- Segmented*/
uint16_t
axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_tx_queue *txq;
uint16_t nb_desc_free;
uint16_t nb_pkt_sent = 0;
uint16_t idx;
uint32_t tail_addr;
struct rte_mbuf *mbuf = NULL;
if (unlikely(nb_pkts == 0))
return nb_pkts;
txq = (struct axgbe_tx_queue *)tx_queue;
nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
if (unlikely(nb_desc_free <= txq->free_thresh)) {
axgbe_xmit_cleanup_seg(txq);
nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
if (unlikely(nb_desc_free == 0))
return 0;
}
while (nb_pkts--) {
mbuf = *tx_pkts++;
if (axgbe_xmit_hw_seg(txq, mbuf))
goto out;
nb_pkt_sent++;
}
out:
/* Sync read and write */
rte_mb();
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
tail_addr = low32_value(txq->ring_phys_addr +
idx * sizeof(struct axgbe_tx_desc));
/* Update tail reg with next immediate address to kick Tx DMA channel*/
AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
txq->pkts += nb_pkt_sent;
return nb_pkt_sent;
}
/* Eal supported tx wrapper*/
uint16_t
axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

View File

@ -173,6 +173,10 @@ int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);