net/nfp: add NFDk Tx

Implement NFP3800 card packet transmit function for firmware
with NFDk.

Signed-off-by: Jin Liu <jin.liu@corigine.com>
Signed-off-by: Diana Wang <na.wang@corigine.com>
Signed-off-by: Peng Zhang <peng.zhang@corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Signed-off-by: Niklas Söderlund <niklas.soderlund@corigine.com>
This commit is contained in:
Jin Liu 2022-06-23 04:26:14 +02:00 committed by Ferruh Yigit
parent 52aab95476
commit c73dced48c
4 changed files with 290 additions and 2 deletions

View File

@ -377,6 +377,7 @@ nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
switch (NFD_CFG_CLASS_VER_of(hw->ver)) { switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
case NFP_NET_CFG_VERSION_DP_NFD3: case NFP_NET_CFG_VERSION_DP_NFD3:
eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops; eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
break; break;
case NFP_NET_CFG_VERSION_DP_NFDK: case NFP_NET_CFG_VERSION_DP_NFDK:
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) { if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
@ -385,6 +386,7 @@ nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
return -EINVAL; return -EINVAL;
} }
eth_dev->dev_ops = &nfp_net_nfdk_eth_dev_ops; eth_dev->dev_ops = &nfp_net_nfdk_eth_dev_ops;
eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts;
break; break;
default: default:
PMD_DRV_LOG(ERR, "The version of firmware is not correct."); PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
@ -393,7 +395,6 @@ nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
eth_dev->rx_queue_count = nfp_net_rx_queue_count; eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
return 0; return 0;
} }

View File

@ -282,6 +282,7 @@ nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
switch (NFD_CFG_CLASS_VER_of(hw->ver)) { switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
case NFP_NET_CFG_VERSION_DP_NFD3: case NFP_NET_CFG_VERSION_DP_NFD3:
eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops; eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
break; break;
case NFP_NET_CFG_VERSION_DP_NFDK: case NFP_NET_CFG_VERSION_DP_NFDK:
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) { if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
@ -290,6 +291,7 @@ nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
return -EINVAL; return -EINVAL;
} }
eth_dev->dev_ops = &nfp_netvf_nfdk_eth_dev_ops; eth_dev->dev_ops = &nfp_netvf_nfdk_eth_dev_ops;
eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts;
break; break;
default: default:
PMD_DRV_LOG(ERR, "The version of firmware is not correct."); PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
@ -298,7 +300,6 @@ nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
eth_dev->rx_queue_count = nfp_net_rx_queue_count; eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
return 0; return 0;
} }

View File

@ -20,6 +20,9 @@
#include "nfp_rxtx.h" #include "nfp_rxtx.h"
#include "nfp_logs.h" #include "nfp_logs.h"
#include "nfp_ctrl.h" #include "nfp_ctrl.h"
#include "nfpcore/nfp_mip.h"
#include "nfpcore/nfp_rtsym.h"
#include "nfpcore/nfp-common/nfp_platform.h"
static int static int
nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
@ -1104,3 +1107,283 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
return 0; return 0;
} }
static inline uint32_t
nfp_net_nfdk_free_tx_desc(struct nfp_net_txq *txq)
{
uint32_t free_desc;
if (txq->wr_p >= txq->rd_p)
free_desc = txq->tx_count - (txq->wr_p - txq->rd_p);
else
free_desc = txq->rd_p - txq->wr_p;
return (free_desc > NFDK_TX_DESC_STOP_CNT) ?
(free_desc - NFDK_TX_DESC_STOP_CNT) : 0;
}
static inline uint32_t
nfp_net_nfdk_txq_full(struct nfp_net_txq *txq)
{
return (nfp_net_nfdk_free_tx_desc(txq) < txq->tx_free_thresh);
}
static inline int
nfp_net_nfdk_headlen_to_segs(unsigned int headlen)
{
return DIV_ROUND_UP(headlen +
NFDK_TX_MAX_DATA_PER_DESC -
NFDK_TX_MAX_DATA_PER_HEAD,
NFDK_TX_MAX_DATA_PER_DESC);
}
static int
nfp_net_nfdk_tx_maybe_close_block(struct nfp_net_txq *txq, struct rte_mbuf *pkt)
{
unsigned int n_descs, wr_p, i, nop_slots;
struct rte_mbuf *pkt_temp;
pkt_temp = pkt;
n_descs = nfp_net_nfdk_headlen_to_segs(pkt_temp->data_len);
while (pkt_temp->next) {
pkt_temp = pkt_temp->next;
n_descs += DIV_ROUND_UP(pkt_temp->data_len, NFDK_TX_MAX_DATA_PER_DESC);
}
if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX))
return -EINVAL;
n_descs += !!(pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
if (round_down(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
round_down(txq->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
goto close_block;
if ((uint32_t)txq->data_pending + pkt->pkt_len > NFDK_TX_MAX_DATA_PER_BLOCK)
goto close_block;
return 0;
close_block:
wr_p = txq->wr_p;
nop_slots = D_BLOCK_CPL(wr_p);
memset(&txq->ktxds[wr_p], 0, nop_slots * sizeof(struct nfp_net_nfdk_tx_desc));
for (i = wr_p; i < nop_slots + wr_p; i++) {
if (txq->txbufs[i].mbuf) {
rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
txq->txbufs[i].mbuf = NULL;
}
}
txq->data_pending = 0;
txq->wr_p = D_IDX(txq, txq->wr_p + nop_slots);
return nop_slots;
}
static inline uint64_t
nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq, struct rte_mbuf *mb,
uint64_t flags)
{
uint64_t ol_flags;
struct nfp_net_hw *hw = txq->hw;
if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
return flags;
ol_flags = mb->ol_flags;
/* IPv6 does not need checksum */
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
flags |= NFDK_DESC_TX_L3_CSUM;
if (ol_flags & RTE_MBUF_F_TX_L4_MASK)
flags |= NFDK_DESC_TX_L4_CSUM;
return flags;
}
static inline uint64_t
nfp_net_nfdk_tx_tso(struct nfp_net_txq *txq, struct rte_mbuf *mb)
{
uint64_t ol_flags;
struct nfp_net_nfdk_tx_desc txd;
struct nfp_net_hw *hw = txq->hw;
if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
goto clean_txd;
ol_flags = mb->ol_flags;
if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
goto clean_txd;
txd.l3_offset = mb->l2_len;
txd.l4_offset = mb->l2_len + mb->l3_len;
txd.lso_meta_res = 0;
txd.mss = rte_cpu_to_le_16(mb->tso_segsz);
txd.lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
txd.lso_totsegs = (mb->pkt_len + mb->tso_segsz) / mb->tso_segsz;
clean_txd:
txd.l3_offset = 0;
txd.l4_offset = 0;
txd.lso_hdrlen = 0;
txd.mss = 0;
txd.lso_totsegs = 0;
txd.lso_meta_res = 0;
return txd.raw;
}
uint16_t
nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
uint32_t buf_idx;
uint64_t dma_addr;
uint16_t free_descs;
uint32_t npkts = 0;
uint64_t metadata = 0;
uint16_t issued_descs = 0;
struct nfp_net_txq *txq;
struct nfp_net_hw *hw;
struct nfp_net_nfdk_tx_desc *ktxds;
struct rte_mbuf *pkt, *temp_pkt;
struct rte_mbuf **lmbuf;
txq = tx_queue;
hw = txq->hw;
PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
txq->qidx, txq->wr_p, nb_pkts);
if ((nfp_net_nfdk_free_tx_desc(txq) < NFDK_TX_DESC_PER_SIMPLE_PKT *
nb_pkts) || (nfp_net_nfdk_txq_full(txq)))
nfp_net_tx_free_bufs(txq);
free_descs = (uint16_t)nfp_net_nfdk_free_tx_desc(txq);
if (unlikely(free_descs == 0))
return 0;
PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets", txq->qidx, nb_pkts);
/* Sending packets */
while ((npkts < nb_pkts) && free_descs) {
uint32_t type, dma_len, dlen_type, tmp_dlen;
int nop_descs, used_descs;
pkt = *(tx_pkts + npkts);
nop_descs = nfp_net_nfdk_tx_maybe_close_block(txq, pkt);
if (nop_descs < 0)
goto xmit_end;
issued_descs += nop_descs;
ktxds = &txq->ktxds[txq->wr_p];
/* Grabbing the mbuf linked to the current descriptor */
buf_idx = txq->wr_p;
lmbuf = &txq->txbufs[buf_idx++].mbuf;
/* Warming the cache for releasing the mbuf later on */
RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
temp_pkt = pkt;
if (unlikely(pkt->nb_segs > 1 &&
!(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
PMD_INIT_LOG(INFO, "Multisegment packet unsupported");
goto xmit_end;
}
/*
* Checksum and VLAN flags just in the first descriptor for a
* multisegment packet, but TSO info needs to be in all of them.
*/
dma_len = pkt->data_len;
if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) &&
(pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
type = NFDK_DESC_TX_TYPE_TSO;
} else if (!pkt->next && dma_len < NFDK_TX_MAX_DATA_PER_HEAD) {
type = NFDK_DESC_TX_TYPE_SIMPLE;
} else {
type = NFDK_DESC_TX_TYPE_GATHER;
}
dma_len -= 1;
dlen_type = (NFDK_DESC_TX_DMA_LEN_HEAD & dma_len) |
(NFDK_DESC_TX_TYPE_HEAD & (type << 12));
ktxds->dma_len_type = rte_cpu_to_le_16(dlen_type);
dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "", dma_addr);
ktxds->dma_addr_hi = rte_cpu_to_le_16(dma_addr >> 32);
ktxds->dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff);
ktxds++;
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
dma_addr += tmp_dlen + 1;
while (pkt) {
if (*lmbuf)
rte_pktmbuf_free_seg(*lmbuf);
*lmbuf = pkt;
while (dma_len > 0) {
dma_len -= 1;
dlen_type = NFDK_DESC_TX_DMA_LEN & dma_len;
ktxds->dma_len_type = rte_cpu_to_le_16(dlen_type);
ktxds->dma_addr_hi = rte_cpu_to_le_16(dma_addr >> 32);
ktxds->dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff);
ktxds++;
dma_len -= dlen_type;
dma_addr += dlen_type + 1;
}
if (!pkt->next)
break;
pkt = pkt->next;
dma_len = pkt->data_len;
dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "", dma_addr);
lmbuf = &txq->txbufs[buf_idx++].mbuf;
}
(ktxds - 1)->dma_len_type = rte_cpu_to_le_16(dlen_type | NFDK_DESC_TX_EOP);
ktxds->raw = rte_cpu_to_le_64(nfp_net_nfdk_tx_cksum(txq, temp_pkt, metadata));
ktxds++;
if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) &&
(temp_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
ktxds->raw = rte_cpu_to_le_64(nfp_net_nfdk_tx_tso(txq, temp_pkt));
ktxds++;
}
used_descs = ktxds - txq->ktxds - txq->wr_p;
if (round_down(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
round_down(txq->wr_p + used_descs - 1, NFDK_TX_DESC_BLOCK_CNT)) {
PMD_INIT_LOG(INFO, "Used descs cross block boundary");
goto xmit_end;
}
txq->wr_p = D_IDX(txq, txq->wr_p + used_descs);
if (txq->wr_p % NFDK_TX_DESC_BLOCK_CNT)
txq->data_pending += temp_pkt->pkt_len;
else
txq->data_pending = 0;
issued_descs += used_descs;
npkts++;
free_descs = (uint16_t)nfp_net_nfdk_free_tx_desc(txq);
}
xmit_end:
/* Increment write pointers. Force memory write before we let HW know */
rte_wmb();
nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
return npkts;
}

View File

@ -352,6 +352,9 @@ int nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t nb_desc, uint16_t nb_desc,
unsigned int socket_id, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf); const struct rte_eth_txconf *tx_conf);
uint16_t nfp_net_nfdk_xmit_pkts(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif /* _NFP_RXTX_H_ */ #endif /* _NFP_RXTX_H_ */
/* /*