net/iavf: fix race condition with Rx timestamp offload

In multi-cores cases for Rx timestamp offload, if packets arrive
too fast, aq command to get phc time will be pended.

This patch adds spinlock to fix this issue. To avoid phc time being
frequently overwritten, move related variables to iavf_rx_queue
structure, and each queue will handle timestamp calculation by itself.

Fixes: b5cd735132 ("net/iavf: enable Rx timestamp on flex descriptor")
Fixes: 33db16136e ("net/iavf: improve performance of Rx timestamp offload")

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Wenjun Wu 2022-05-23 12:49:00 +08:00 committed by Qi Zhang
parent bc0e85586e
commit 149280731b
5 changed files with 47 additions and 41 deletions

View File

@ -270,6 +270,7 @@ struct iavf_info {
struct rte_eth_dev *eth_dev;
uint32_t ptp_caps;
rte_spinlock_t phc_time_aq_lock;
};
#define IAVF_MAX_PKT_TYPE 1024
@ -315,8 +316,6 @@ struct iavf_adapter {
bool closed;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
uint64_t phc_time;
uint64_t hw_time_update;
};
/* IAVF_DEV_PRIVATE_TO */
@ -482,5 +481,5 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
int iavf_get_ptp_cap(struct iavf_adapter *adapter);
int iavf_get_phc_time(struct iavf_adapter *adapter);
int iavf_get_phc_time(struct iavf_rx_queue *rxq);
#endif /* _IAVF_ETHDEV_H_ */

View File

@ -1039,20 +1039,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
true);
rte_spinlock_init(&vf->phc_time_aq_lock);
if (iavf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
if (dev->data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
if (iavf_get_phc_time(adapter)) {
PMD_DRV_LOG(ERR, "get physical time failed");
goto err_mac;
}
adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
}
return 0;
err_mac:

View File

@ -904,6 +904,15 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
RTE_ETH_QUEUE_STATE_STARTED;
}
if (dev->data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
if (iavf_get_phc_time(rxq)) {
PMD_DRV_LOG(ERR, "get physical time failed");
return err;
}
rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
}
return err;
}
@ -1440,6 +1449,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
@ -1448,15 +1458,13 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
if (sw_cur_time - ad->hw_time_update > 4) {
if (iavf_get_phc_time(ad))
if (sw_cur_time - rxq->hw_time_update > 4) {
if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
ad->hw_time_update = sw_cur_time;
rxq->hw_time_update = sw_cur_time;
}
}
@ -1523,11 +1531,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
ad->phc_time = ts_ns;
ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
rxq->phc_time = ts_ns;
rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
@ -1563,7 +1571,6 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
@ -1572,10 +1579,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
if (sw_cur_time - ad->hw_time_update > 4) {
if (iavf_get_phc_time(ad))
if (sw_cur_time - rxq->hw_time_update > 4) {
if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
ad->hw_time_update = sw_cur_time;
rxq->hw_time_update = sw_cur_time;
}
}
@ -1692,11 +1700,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
ad->phc_time = ts_ns;
ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
rxq->phc_time = ts_ns;
rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
@ -1899,7 +1907,6 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
@ -1913,10 +1920,11 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
if (sw_cur_time - ad->hw_time_update > 4) {
if (iavf_get_phc_time(ad))
if (sw_cur_time - rxq->hw_time_update > 4) {
if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
ad->hw_time_update = sw_cur_time;
rxq->hw_time_update = sw_cur_time;
}
}
@ -1977,11 +1985,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
ad->phc_time = ts_ns;
ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
rxq->phc_time = ts_ns;
rxq->hw_time_update = rte_get_timer_cycles() /
(rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,

View File

@ -223,6 +223,8 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
uint64_t phc_time;
uint64_t hw_time_update;
};
struct iavf_tx_entry {

View File

@ -1913,12 +1913,13 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter)
}
int
iavf_get_phc_time(struct iavf_adapter *adapter)
iavf_get_phc_time(struct iavf_rx_queue *rxq)
{
struct iavf_adapter *adapter = rxq->vsi->adapter;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_phc_time phc_time;
struct iavf_cmd_info args;
int err;
int err = 0;
args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
args.in_args = (uint8_t *)&phc_time;
@ -1926,14 +1927,16 @@ iavf_get_phc_time(struct iavf_adapter *adapter)
args.out_buffer = vf->aq_resp;
args.out_size = IAVF_AQ_BUF_SZ;
rte_spinlock_lock(&vf->phc_time_aq_lock);
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
PMD_DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
return err;
goto out;
}
rxq->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
return 0;
out:
rte_spinlock_unlock(&vf->phc_time_aq_lock);
return err;
}