net/hns3: implement Tx mbuf free on demand

This patch add support tx_done_cleanup ops, which could support for
the API rte_eth_tx_done_cleanup to free consumed mbufs on Tx ring.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
This commit is contained in:
Chengwen Feng 2021-03-04 15:44:43 +08:00 committed by Ferruh Yigit
parent ef1fbd3554
commit dfecc3201f
7 changed files with 65 additions and 0 deletions

View File

@ -10,6 +10,7 @@ Queue start/stop = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
Free Tx mbuf on demand = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y

View File

@ -10,6 +10,7 @@ Queue start/stop = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
Free Tx mbuf on demand = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y

View File

@ -58,6 +58,7 @@ New Features
* **Updated Hisilicon hns3 driver.**
* Added support for module EEPROM dumping.
* Added support for freeing Tx mbuf on demand.
* **Updated Wangxun txgbe driver.**

View File

@ -6388,6 +6388,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.fec_get = hns3_fec_get,
.fec_set = hns3_fec_set,
.tm_ops_get = hns3_tm_ops_get,
.tx_done_cleanup = hns3_tx_done_cleanup,
};
static const struct hns3_reset_ops hns3_reset_ops = {

View File

@ -2763,6 +2763,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.vlan_offload_set = hns3vf_vlan_offload_set,
.get_reg = hns3_get_regs,
.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
.tx_done_cleanup = hns3_tx_done_cleanup,
};
static const struct hns3_reset_ops hns3vf_reset_ops = {

View File

@ -3913,6 +3913,65 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return 0;
}
static int
hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
{
uint16_t next_to_clean = txq->next_to_clean;
uint16_t next_to_use = txq->next_to_use;
uint16_t tx_bd_ready = txq->tx_bd_ready;
struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
uint32_t idx;
if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
free_cnt = txq->nb_tx_desc;
for (idx = 0; idx < free_cnt; idx++) {
if (next_to_clean == next_to_use)
break;
if (desc->tx.tp_fe_sc_vld_ra_ri &
rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
break;
if (tx_pkt->mbuf != NULL) {
rte_pktmbuf_free_seg(tx_pkt->mbuf);
tx_pkt->mbuf = NULL;
}
next_to_clean++;
tx_bd_ready++;
tx_pkt++;
desc++;
if (next_to_clean == txq->nb_tx_desc) {
tx_pkt = txq->sw_ring;
desc = txq->tx_ring;
next_to_clean = 0;
}
}
if (idx > 0) {
txq->next_to_clean = next_to_clean;
txq->tx_bd_ready = tx_bd_ready;
}
return (int)idx;
}
int
hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
{
struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
if (dev->tx_pkt_burst == hns3_xmit_pkts)
return hns3_tx_done_cleanup_full(q, free_cnt);
else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
return 0;
else
return -ENOTSUP;
}
uint32_t
hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{

View File

@ -706,5 +706,6 @@ int hns3_start_all_txqs(struct rte_eth_dev *dev);
int hns3_start_all_rxqs(struct rte_eth_dev *dev);
void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
#endif /* _HNS3_RXTX_H_ */