net/iavf: improve performance of Rx timestamp offload

In this patch, We use CPU ticks instead of HW register
to determine whether low 32 bits timestamp has turned
over. It can avoid requesting register value frequently
and improve receiving performance.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Wenjun Wu 2022-04-28 16:13:46 +08:00 committed by Qi Zhang
parent b5cd735132
commit 33db16136e
4 changed files with 43 additions and 19 deletions

View File

@ -315,6 +315,7 @@ struct iavf_adapter {
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
uint64_t phc_time;
uint64_t hw_time_update;
};
/* IAVF_DEV_PRIVATE_TO */

View File

@ -1019,6 +1019,15 @@ iavf_dev_start(struct rte_eth_dev *dev)
goto err_mac;
}
if (dev->data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
if (iavf_get_phc_time(adapter)) {
PMD_DRV_LOG(ERR, "get physical time failed");
goto err_mac;
}
adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
}
return 0;
err_mac:

View File

@ -1433,8 +1433,14 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
rxq->hw_register_set = 1;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
if (sw_cur_time - ad->hw_time_update > 4) {
if (iavf_get_phc_time(ad))
PMD_DRV_LOG(ERR, "get physical time failed");
ad->hw_time_update = sw_cur_time;
}
}
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@ -1499,13 +1505,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
if (rxq->hw_register_set)
iavf_get_phc_time(ad);
rxq->hw_register_set = 0;
ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
ad->phc_time = ts_ns;
ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = ts_ns;
@ -1547,8 +1552,14 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
volatile union iavf_rx_flex_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
rxq->hw_register_set = 1;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
if (sw_cur_time - ad->hw_time_update > 4) {
if (iavf_get_phc_time(ad))
PMD_DRV_LOG(ERR, "get physical time failed");
ad->hw_time_update = sw_cur_time;
}
}
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@ -1663,13 +1674,12 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
if (rxq->hw_register_set)
iavf_get_phc_time(ad);
rxq->hw_register_set = 0;
ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
ad->phc_time = ts_ns;
ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = ts_ns;
@ -1883,8 +1893,14 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
rxq->hw_register_set = 1;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
if (sw_cur_time - ad->hw_time_update > 4) {
if (iavf_get_phc_time(ad))
PMD_DRV_LOG(ERR, "get physical time failed");
ad->hw_time_update = sw_cur_time;
}
}
/* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
@ -1943,13 +1959,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
if (rxq->hw_register_set)
iavf_get_phc_time(ad);
rxq->hw_register_set = 0;
ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
ad->phc_time = ts_ns;
ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = ts_ns;

View File

@ -222,7 +222,6 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
uint32_t hw_register_set;
};
struct iavf_tx_entry {