net/ice: support IEEE 1588 PTP
Add ice support for new ethdev APIs to enable/disable and read/write/adjust IEEE1588 PTP timestamps. Currently, only scalar path supports 1588 PTP, vector path doesn't. The example command for running ptpclient is as below: ./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1 Signed-off-by: Simei Su <simei.su@intel.com> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
parent
b71573ec2f
commit
646dcbe6c7
@ -32,6 +32,7 @@ Timestamp offload = P
|
||||
Inner L3 checksum = P
|
||||
Inner L4 checksum = P
|
||||
Packet type parsing = Y
|
||||
Timesync = Y
|
||||
Rx descriptor status = Y
|
||||
Tx descriptor status = Y
|
||||
Basic stats = Y
|
||||
|
@ -83,6 +83,7 @@ New Features
|
||||
* Added 1PPS out support by a devargs.
|
||||
* Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
|
||||
* Added DEV_RX_OFFLOAD_TIMESTAMP support.
|
||||
* Added timesync API support under scalar path.
|
||||
|
||||
* **Updated Marvell cnxk ethdev driver.**
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "base/ice_flow.h"
|
||||
#include "base/ice_dcb.h"
|
||||
#include "base/ice_common.h"
|
||||
#include "base/ice_ptp_hw.h"
|
||||
|
||||
#include "rte_pmd_ice.h"
|
||||
#include "ice_ethdev.h"
|
||||
@ -32,6 +33,8 @@
|
||||
#define ICE_ONE_PPS_OUT_ARG "pps_out"
|
||||
#define ICE_RX_LOW_LATENCY_ARG "rx_low_latency"
|
||||
|
||||
#define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
|
||||
|
||||
uint64_t ice_timestamp_dynflag;
|
||||
int ice_timestamp_dynfield_offset = -1;
|
||||
|
||||
@ -45,7 +48,6 @@ static const char * const ice_valid_args[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
#define NSEC_PER_SEC 1000000000
|
||||
#define PPS_OUT_DELAY_NS 1
|
||||
|
||||
static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
|
||||
@ -151,6 +153,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
|
||||
struct rte_eth_udp_tunnel *udp_tunnel);
|
||||
static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
|
||||
struct rte_eth_udp_tunnel *udp_tunnel);
|
||||
static int ice_timesync_enable(struct rte_eth_dev *dev);
|
||||
static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
|
||||
struct timespec *timestamp,
|
||||
uint32_t flags);
|
||||
static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
|
||||
struct timespec *timestamp);
|
||||
static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
|
||||
static int ice_timesync_read_time(struct rte_eth_dev *dev,
|
||||
struct timespec *timestamp);
|
||||
static int ice_timesync_write_time(struct rte_eth_dev *dev,
|
||||
const struct timespec *timestamp);
|
||||
static int ice_timesync_disable(struct rte_eth_dev *dev);
|
||||
|
||||
static const struct rte_pci_id pci_id_ice_map[] = {
|
||||
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
|
||||
@ -234,6 +248,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
|
||||
.udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
|
||||
.tx_done_cleanup = ice_tx_done_cleanup,
|
||||
.get_monitor_addr = ice_get_monitor_addr,
|
||||
.timesync_enable = ice_timesync_enable,
|
||||
.timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp,
|
||||
.timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp,
|
||||
.timesync_adjust_time = ice_timesync_adjust_time,
|
||||
.timesync_read_time = ice_timesync_read_time,
|
||||
.timesync_write_time = ice_timesync_write_time,
|
||||
.timesync_disable = ice_timesync_disable,
|
||||
};
|
||||
|
||||
/* store statistics names and its offset in stats structure */
|
||||
@ -5486,6 +5507,184 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_enable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
int ret;
|
||||
|
||||
if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_TIMESTAMP)) {
|
||||
PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (hw->func_caps.ts_func_info.src_tmr_owned) {
|
||||
ret = ice_ptp_init_phc(hw);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Failed to initialize PHC");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Failed to write PHC increment time value");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize cycle counters for system time/RX/TX timestamp */
|
||||
memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
|
||||
memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
|
||||
memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
|
||||
|
||||
ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
|
||||
ad->systime_tc.cc_shift = 0;
|
||||
ad->systime_tc.nsec_mask = 0;
|
||||
|
||||
ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
|
||||
ad->rx_tstamp_tc.cc_shift = 0;
|
||||
ad->rx_tstamp_tc.nsec_mask = 0;
|
||||
|
||||
ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
|
||||
ad->tx_tstamp_tc.cc_shift = 0;
|
||||
ad->tx_tstamp_tc.nsec_mask = 0;
|
||||
|
||||
ad->ptp_ena = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
|
||||
struct timespec *timestamp, uint32_t flags)
|
||||
{
|
||||
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct ice_rx_queue *rxq;
|
||||
uint32_t ts_high;
|
||||
uint64_t ts_ns, ns;
|
||||
|
||||
rxq = dev->data->rx_queues[flags];
|
||||
|
||||
ts_high = rxq->time_high;
|
||||
ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
|
||||
ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
|
||||
*timestamp = rte_ns_to_timespec(ns);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
|
||||
struct timespec *timestamp)
|
||||
{
|
||||
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
uint8_t lport;
|
||||
uint64_t ts_ns, ns, tstamp;
|
||||
const uint64_t mask = 0xFFFFFFFF;
|
||||
int ret;
|
||||
|
||||
lport = hw->port_info->lport;
|
||||
|
||||
ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
|
||||
ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
|
||||
*timestamp = rte_ns_to_timespec(ns);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
|
||||
{
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
|
||||
ad->systime_tc.nsec += delta;
|
||||
ad->rx_tstamp_tc.nsec += delta;
|
||||
ad->tx_tstamp_tc.nsec += delta;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
|
||||
{
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
uint64_t ns;
|
||||
|
||||
ns = rte_timespec_to_ns(ts);
|
||||
|
||||
ad->systime_tc.nsec = ns;
|
||||
ad->rx_tstamp_tc.nsec = ns;
|
||||
ad->tx_tstamp_tc.nsec = ns;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
|
||||
{
|
||||
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
uint32_t hi, lo, lo2;
|
||||
uint64_t time, ns;
|
||||
|
||||
lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
|
||||
hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
|
||||
lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
|
||||
|
||||
if (lo2 < lo) {
|
||||
lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
|
||||
hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
|
||||
}
|
||||
|
||||
time = ((uint64_t)hi << 32) | lo;
|
||||
ns = rte_timecounter_update(&ad->systime_tc, time);
|
||||
*ts = rte_ns_to_timespec(ns);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_timesync_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
uint64_t val;
|
||||
uint8_t lport;
|
||||
|
||||
lport = hw->port_info->lport;
|
||||
|
||||
ice_clear_phy_tstamp(hw, lport, 0);
|
||||
|
||||
val = ICE_READ_REG(hw, GLTSYN_ENA(0));
|
||||
val &= ~GLTSYN_ENA_TSYN_ENA_M;
|
||||
ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
|
||||
|
||||
ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
|
||||
ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
|
||||
|
||||
ad->ptp_ena = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
struct rte_pci_device *pci_dev)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define _ICE_ETHDEV_H_
|
||||
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_time.h>
|
||||
|
||||
#include <ethdev_driver.h>
|
||||
|
||||
@ -502,6 +503,11 @@ struct ice_adapter {
|
||||
struct ice_devargs devargs;
|
||||
enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
|
||||
uint16_t fdir_ref_cnt;
|
||||
/* For PTP */
|
||||
struct rte_timecounter systime_tc;
|
||||
struct rte_timecounter rx_tstamp_tc;
|
||||
struct rte_timecounter tx_tstamp_tc;
|
||||
bool ptp_ena;
|
||||
#ifdef RTE_ARCH_X86
|
||||
bool rx_use_avx2;
|
||||
bool rx_use_avx512;
|
||||
|
@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
|
||||
struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
|
||||
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
|
||||
uint32_t regval;
|
||||
struct ice_adapter *ad = rxq->vsi->adapter;
|
||||
|
||||
/* Set buffer size as the head split is disabled. */
|
||||
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
|
||||
@ -366,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
|
||||
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
|
||||
QRXFLXP_CNTXT_RXDID_PRIO_M;
|
||||
|
||||
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
|
||||
if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
|
||||
regval |= QRXFLXP_CNTXT_TS_M;
|
||||
|
||||
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
|
||||
@ -704,6 +705,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
tx_ctx.tso_ena = 1; /* tso enable */
|
||||
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
|
||||
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
|
||||
tx_ctx.tsyn_ena = 1;
|
||||
|
||||
ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
|
||||
ice_tlan_ctx_info);
|
||||
@ -1564,6 +1566,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
|
||||
struct ice_vsi *vsi = rxq->vsi;
|
||||
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
|
||||
uint64_t ts_ns;
|
||||
struct ice_adapter *ad = rxq->vsi->adapter;
|
||||
|
||||
rxdp = &rxq->rx_ring[rxq->rx_tail];
|
||||
rxep = &rxq->sw_ring[rxq->rx_tail];
|
||||
@ -1618,6 +1621,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
|
||||
}
|
||||
}
|
||||
|
||||
if (ad->ptp_ena && ((mb->packet_type &
|
||||
RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
|
||||
rxq->time_high =
|
||||
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
|
||||
mb->timesync = rxq->queue_id;
|
||||
pkt_flags |= PKT_RX_IEEE1588_PTP;
|
||||
}
|
||||
|
||||
mb->ol_flags |= pkt_flags;
|
||||
}
|
||||
|
||||
@ -1804,6 +1815,7 @@ ice_recv_scattered_pkts(void *rx_queue,
|
||||
struct ice_vsi *vsi = rxq->vsi;
|
||||
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
|
||||
uint64_t ts_ns;
|
||||
struct ice_adapter *ad = rxq->vsi->adapter;
|
||||
|
||||
while (nb_rx < nb_pkts) {
|
||||
rxdp = &rx_ring[rx_id];
|
||||
@ -1926,6 +1938,14 @@ ice_recv_scattered_pkts(void *rx_queue,
|
||||
}
|
||||
}
|
||||
|
||||
if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
|
||||
== RTE_PTYPE_L2_ETHER_TIMESYNC)) {
|
||||
rxq->time_high =
|
||||
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
|
||||
first_seg->timesync = rxq->queue_id;
|
||||
pkt_flags |= PKT_RX_IEEE1588_PTP;
|
||||
}
|
||||
|
||||
first_seg->ol_flags |= pkt_flags;
|
||||
/* Prefetch data of first segment, if configured to do so. */
|
||||
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
|
||||
@ -2284,6 +2304,7 @@ ice_recv_pkts(void *rx_queue,
|
||||
struct ice_vsi *vsi = rxq->vsi;
|
||||
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
|
||||
uint64_t ts_ns;
|
||||
struct ice_adapter *ad = rxq->vsi->adapter;
|
||||
|
||||
while (nb_rx < nb_pkts) {
|
||||
rxdp = &rx_ring[rx_id];
|
||||
@ -2347,6 +2368,14 @@ ice_recv_pkts(void *rx_queue,
|
||||
}
|
||||
}
|
||||
|
||||
if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
|
||||
RTE_PTYPE_L2_ETHER_TIMESYNC)) {
|
||||
rxq->time_high =
|
||||
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
|
||||
rxm->timesync = rxq->queue_id;
|
||||
pkt_flags |= PKT_RX_IEEE1588_PTP;
|
||||
}
|
||||
|
||||
rxm->ol_flags |= pkt_flags;
|
||||
/* copy old mbuf to rx_pkts */
|
||||
rx_pkts[nb_rx++] = rxm;
|
||||
@ -2558,7 +2587,8 @@ ice_calc_context_desc(uint64_t flags)
|
||||
static uint64_t mask = PKT_TX_TCP_SEG |
|
||||
PKT_TX_QINQ |
|
||||
PKT_TX_OUTER_IP_CKSUM |
|
||||
PKT_TX_TUNNEL_MASK;
|
||||
PKT_TX_TUNNEL_MASK |
|
||||
PKT_TX_IEEE1588_TMST;
|
||||
|
||||
return (flags & mask) ? 1 : 0;
|
||||
}
|
||||
@ -2726,6 +2756,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
if (ol_flags & PKT_TX_TCP_SEG)
|
||||
cd_type_cmd_tso_mss |=
|
||||
ice_set_tso_ctx(tx_pkt, tx_offload);
|
||||
else if (ol_flags & PKT_TX_IEEE1588_TMST)
|
||||
cd_type_cmd_tso_mss |=
|
||||
((uint64_t)ICE_TX_CTX_DESC_TSYN <<
|
||||
ICE_TXD_CTX_QW1_CMD_S);
|
||||
|
||||
ctx_txd->tunneling_params =
|
||||
rte_cpu_to_le_32(cd_tunneling_params);
|
||||
@ -3127,6 +3161,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
|
||||
ad->rx_use_avx512 = false;
|
||||
ad->rx_use_avx2 = false;
|
||||
rx_check_ret = ice_rx_vec_dev_check(dev);
|
||||
if (ad->ptp_ena)
|
||||
rx_check_ret = -1;
|
||||
if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
|
||||
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
|
||||
ad->rx_vec_allowed = true;
|
||||
|
@ -92,6 +92,7 @@ struct ice_rx_queue {
|
||||
ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
|
||||
ice_rx_release_mbufs_t rx_rel_mbufs;
|
||||
uint64_t offloads;
|
||||
uint32_t time_high;
|
||||
};
|
||||
|
||||
struct ice_tx_entry {
|
||||
|
Loading…
x
Reference in New Issue
Block a user