net/mlx5: convert Rx timestamps in real-time format

The ConnectX-6DX supports the timestamps in various formats,
the new realtime format is introduced - the upper 32-bit word
of timestamp contains the UTC seconds and the lower 32-bit word
contains the nanoseconds. This patch detects what format is
configured in the NIC and performs the conversion accordingly.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
Viacheslav Ovsiienko 2020-07-16 08:23:20 +00:00 committed by Ferruh Yigit
parent bb7ef9a962
commit a2854c4de1
9 changed files with 149 additions and 35 deletions

View File

@ -936,6 +936,27 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
goto error;
#endif
}
if (config.devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
err = mlx5_devx_cmd_register_read
(sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
reg, MLX5_ST_SZ_DW(register_mtutc));
if (!err) {
uint32_t ts_mode;
/* MTUTC register is read successfully. */
ts_mode = MLX5_GET(register_mtutc, reg,
time_stamp_mode);
if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
config.rt_timestamp = 1;
} else {
/* Kernel does not support register reading. */
if (config.hca_attr.dev_freq_khz ==
(NS_PER_S / MS_PER_S))
config.rt_timestamp = 1;
}
}
if (config.mprq.enabled && mprq) {
if (config.mprq.stride_num_n &&
(config.mprq.stride_num_n > mprq_max_stride_num_n ||

View File

@ -216,6 +216,7 @@ struct mlx5_dev_config {
unsigned int devx:1; /* Whether devx interface is available or not. */
unsigned int dest_tir:1; /* Whether advanced DR API is available. */
unsigned int reclaim_mode:2; /* Memory reclaim mode. */
unsigned int rt_timestamp:1; /* realtime timestamp format. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
unsigned int stride_num_n; /* Number of strides. */

View File

@ -2972,3 +2972,27 @@ mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
priv->drop_queue.hrxq = NULL;
}
}
/**
* Set the Rx queue timestamp conversion parameters
*
* @param[in] dev
* Pointer to the Ethernet device structure.
*/
void
mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
if (!(*priv->rxqs)[i])
continue;
data = (*priv->rxqs)[i];
data->sh = sh;
data->rt_timestamp = priv->config.rt_timestamp;
}
}

View File

@ -1389,7 +1389,11 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
}
if (rxq->hw_timestamp) {
pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
if (rxq->rt_timestamp)
ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
pkt->timestamp = ts;
pkt->ol_flags |= PKT_RX_TIMESTAMP;
}
}

View File

@ -109,6 +109,7 @@ enum mlx5_rxq_err_state {
struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int rt_timestamp:1; /* Realtime timestamp format. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
@ -148,6 +149,7 @@ struct mlx5_rxq_data {
struct rte_mempool *mp;
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
uint16_t idx; /* Queue index. */
struct mlx5_rxq_stats stats;
rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
@ -442,6 +444,8 @@ struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
/* mlx5_txq.c */

View File

@ -1024,14 +1024,32 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
pkts[pos]->timestamp =
rte_be_to_cpu_64(cq[pos].timestamp);
pkts[pos + 1]->timestamp =
rte_be_to_cpu_64(cq[pos + p1].timestamp);
pkts[pos + 2]->timestamp =
rte_be_to_cpu_64(cq[pos + p2].timestamp);
pkts[pos + 3]->timestamp =
rte_be_to_cpu_64(cq[pos + p3].timestamp);
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
uint64_t ts;
ts = rte_be_to_cpu_64(cq[pos].timestamp);
pkts[pos]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
pkts[pos + 1]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
pkts[pos + 2]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
pkts[pos + 3]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
} else {
pkts[pos]->timestamp = rte_be_to_cpu_64
(cq[pos].timestamp);
pkts[pos + 1]->timestamp = rte_be_to_cpu_64
(cq[pos + p1].timestamp);
pkts[pos + 2]->timestamp = rte_be_to_cpu_64
(cq[pos + p2].timestamp);
pkts[pos + 3]->timestamp = rte_be_to_cpu_64
(cq[pos + p3].timestamp);
}
}
if (rxq->dynf_meta) {
uint64_t flag = rxq->flow_meta_mask;

View File

@ -694,22 +694,44 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
if (rxq->hw_timestamp) {
elts[pos]->timestamp =
rte_be_to_cpu_64(
container_of(p0, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 1]->timestamp =
rte_be_to_cpu_64(
container_of(p1, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 2]->timestamp =
rte_be_to_cpu_64(
container_of(p2, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 3]->timestamp =
rte_be_to_cpu_64(
container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
uint64_t ts;
ts = rte_be_to_cpu_64
(container_of(p0, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64
(container_of(p1, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 1]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64
(container_of(p2, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 2]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64
(container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 3]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
} else {
elts[pos]->timestamp = rte_be_to_cpu_64
(container_of(p0, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 1]->timestamp = rte_be_to_cpu_64
(container_of(p1, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 2]->timestamp = rte_be_to_cpu_64
(container_of(p2, struct mlx5_cqe,
pkt_info)->timestamp);
elts[pos + 3]->timestamp = rte_be_to_cpu_64
(container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
}
}
if (!!rxq->flow_meta_mask) {
/* This code is subject for futher optimization. */

View File

@ -655,14 +655,32 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
pkts[pos]->timestamp =
rte_be_to_cpu_64(cq[pos].timestamp);
pkts[pos + 1]->timestamp =
rte_be_to_cpu_64(cq[pos + p1].timestamp);
pkts[pos + 2]->timestamp =
rte_be_to_cpu_64(cq[pos + p2].timestamp);
pkts[pos + 3]->timestamp =
rte_be_to_cpu_64(cq[pos + p3].timestamp);
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
uint64_t ts;
ts = rte_be_to_cpu_64(cq[pos].timestamp);
pkts[pos]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
pkts[pos + 1]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
pkts[pos + 2]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
pkts[pos + 3]->timestamp =
mlx5_txpp_convert_rx_ts(sh, ts);
} else {
pkts[pos]->timestamp = rte_be_to_cpu_64
(cq[pos].timestamp);
pkts[pos + 1]->timestamp = rte_be_to_cpu_64
(cq[pos + p1].timestamp);
pkts[pos + 2]->timestamp = rte_be_to_cpu_64
(cq[pos + p2].timestamp);
pkts[pos + 3]->timestamp = rte_be_to_cpu_64
(cq[pos + p3].timestamp);
}
}
if (rxq->dynf_meta) {
/* This code is subject for futher optimization. */

View File

@ -329,9 +329,11 @@ mlx5_dev_start(struct rte_eth_dev *dev)
dev->data->port_id);
goto error;
}
/* Set a mask and offset of dynamic metadata flows into Rx queues*/
/* Set a mask and offset of dynamic metadata flows into Rx queues. */
mlx5_flow_rxq_dynf_metadata_set(dev);
/* Set a mask and offset of scheduling on timestamp into Tx queues*/
/* Set flags and context to convert Rx timestamps. */
mlx5_rxq_timestamp_set(dev);
/* Set a mask and offset of scheduling on timestamp into Tx queues. */
mlx5_txq_dynf_timestamp_set(dev);
/*
* In non-cached mode, it only needs to start the default mreg copy