net/cnxk: support pending Tx mbuf cleanup

Once mbufs are transmitted, mbufs are freed by H/W. No mbufs are
accumalated as a pending mbuf.
Hence operation is NOP for cnxk platform.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
This commit is contained in:
Sunil Kumar Kori 2021-06-23 10:16:47 +05:30 committed by Jerin Jacob
parent e191360cee
commit 4be0b2b140
3 changed files with 12 additions and 0 deletions

View File

@ -1211,6 +1211,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
.xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id, .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
.rxq_info_get = cnxk_nix_rxq_info_get, .rxq_info_get = cnxk_nix_rxq_info_get,
.txq_info_get = cnxk_nix_txq_info_get, .txq_info_get = cnxk_nix_txq_info_get,
.tx_done_cleanup = cnxk_nix_tx_done_cleanup,
}; };
static int static int

View File

@ -266,6 +266,7 @@ int cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id); uint16_t rx_queue_id);
int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool); int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
int cnxk_nix_configure(struct rte_eth_dev *eth_dev); int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,

View File

@ -658,3 +658,13 @@ cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf)); memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
} }
/* It is a NOP for cnxk as HW frees the buffer on xmit */
int
cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
{
RTE_SET_USED(txq);
RTE_SET_USED(free_cnt);
return 0;
}