net/cnxk: enable PTP for event Rx adapter

Add support to enable PTP per ethernet device when that
specific ethernet device is connected to event device via
Rx adapter.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2022-09-12 18:43:54 +05:30 committed by Jerin Jacob
parent d826133ae8
commit f1cdb3c5b6
8 changed files with 82 additions and 51 deletions

View File

@ -161,14 +161,15 @@ roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
volatile const __uint128_t *src128 = (const __uint128_t *)in;
volatile __uint128_t *dst128 = (__uint128_t *)out;
uint32_t i;
dst128[0] = src128[0];
dst128[1] = src128[1];
/* lmtext receives following value:
* 1: NIX_SUBDC_EXT needed i.e. tx vlan case
*/
if (lmtext)
dst128[2] = src128[2];
for (i = 0; i < lmtext; i++)
dst128[2 + i] = src128[2 + i];
}
static __plt_always_inline void

View File

@ -694,8 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
void *tstmp_info)
cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@ -703,7 +702,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
for (i = 0; i < dev->nb_event_ports; i++) {
struct cn10k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
ws->tstamp = tstmp_info;
ws->tstamp = dev->tstamp;
}
}
@ -715,7 +714,6 @@ cn10k_sso_rx_adapter_queue_add(
{
struct cn10k_eth_rxq *rxq;
void *lookup_mem;
void *tstmp_info;
int rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
@ -728,8 +726,7 @@ cn10k_sso_rx_adapter_queue_add(
return -EINVAL;
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
tstmp_info = rxq->tstamp;
cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
cn10k_sso_set_priv_mem(event_dev, lookup_mem);
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;

View File

@ -108,12 +108,29 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
mbuf_init | ((uint64_t)port_id) << 48, flags);
}
static void
cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
struct cnxk_timesync_info *tstamp)
{
uint64_t tstamp_ptr;
uint8_t laptr;
laptr = (uint8_t) *
(uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
if (laptr == sizeof(uint64_t)) {
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
CNXK_SSO_WQE_SG_PTR);
cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
(uint64_t *)tstamp_ptr);
}
}
static __rte_always_inline void
cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
void *lookup_mem, void *tstamp, uintptr_t lbase)
{
uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
struct rte_event_vector *vec;
uint64_t aura_handle, laddr;
uint16_t nb_mbufs, non_vec;
@ -133,6 +150,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
rte_prefetch0(&vec->ptrs[i]);
if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
mbuf_init |= 8;
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
flags | NIX_RX_VWQE_F, lookup_mem,
@ -158,7 +178,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
while (non_vec) {
struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
uint64_t tstamp_ptr;
mbuf = (struct rte_mbuf *)((char *)cqe -
sizeof(struct rte_mbuf));
@ -178,12 +197,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
mbuf_init, flags);
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
CNXK_SSO_WQE_SG_PTR);
cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
flags & NIX_RX_OFFLOAD_TSTAMP_F,
(uint64_t *)tstamp_ptr);
if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
cn10k_sso_process_tstamp((uint64_t)wqe[0],
(uint64_t)mbuf, tstamp);
wqe[0] = (struct rte_mbuf *)mbuf;
non_vec--;
wqe++;
@ -200,8 +217,6 @@ static __rte_always_inline void
cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
const uint32_t flags)
{
uint64_t tstamp_ptr;
u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
(u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
if ((flags & CPT_RX_WQE_F) &&
@ -246,12 +261,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
ws->lookup_mem);
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
CNXK_SSO_WQE_SG_PTR);
cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
flags & NIX_RX_OFFLOAD_TSTAMP_F,
(uint64_t *)tstamp_ptr);
if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
cn10k_sso_process_tstamp(u64[1], mbuf,
ws->tstamp[port]);
u64[1] = mbuf;
} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
RTE_EVENT_TYPE_ETHDEV_VECTOR) {
@ -262,7 +274,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
ws->tstamp, ws->lmt_base);
ws->tstamp[port], ws->lmt_base);
/* Mark vector mempool object as get */
RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
(void **)&u64[1], 1, 1);

View File

@ -123,7 +123,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
struct cnxk_timesync_info *tstamp;
struct cnxk_timesync_info **tstamp;
struct cn9k_sso_hws_dual *dws;
struct cn9k_sso_hws *ws;
uint64_t cq_ds_cnt = 1;
@ -942,8 +942,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
void *tstmp_info)
cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@ -953,11 +952,11 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
struct cn9k_sso_hws_dual *dws =
event_dev->data->ports[i];
dws->lookup_mem = lookup_mem;
dws->tstamp = tstmp_info;
dws->tstamp = dev->tstamp;
} else {
struct cn9k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
ws->tstamp = tstmp_info;
ws->tstamp = dev->tstamp;
}
}
}
@ -970,7 +969,6 @@ cn9k_sso_rx_adapter_queue_add(
{
struct cn9k_eth_rxq *rxq;
void *lookup_mem;
void *tstmp_info;
int rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
@ -984,8 +982,7 @@ cn9k_sso_rx_adapter_queue_add(
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
tstmp_info = rxq->tstamp;
cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
cn9k_sso_set_priv_mem(event_dev, lookup_mem);
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;

View File

@ -169,13 +169,29 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
mbuf_init | ((uint64_t)port_id) << 48, flags);
}
static void
cn9k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
struct cnxk_timesync_info *tstamp)
{
uint64_t tstamp_ptr;
uint8_t laptr;
laptr = (uint8_t) *
(uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
if (laptr == sizeof(uint64_t)) {
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
CNXK_SSO_WQE_SG_PTR);
cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
(uint64_t *)tstamp_ptr);
}
}
static __rte_always_inline void
cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
const void *const lookup_mem,
struct cnxk_timesync_info *tstamp)
struct cnxk_timesync_info **tstamp)
{
uint64_t tstamp_ptr;
u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
(u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
if ((flags & CPT_RX_WQE_F) &&
@ -187,12 +203,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
lookup_mem);
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
CNXK_SSO_WQE_SG_PTR);
cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
flags & NIX_RX_OFFLOAD_TSTAMP_F,
(uint64_t *)tstamp_ptr);
if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
u64[1] = mbuf;
}
}
@ -298,7 +310,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
static __rte_always_inline uint16_t
cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
const uint32_t flags, void *lookup_mem,
struct cnxk_timesync_info *tstamp)
struct cnxk_timesync_info **tstamp)
{
union {
__uint128_t get_work;

View File

@ -38,6 +38,7 @@
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_SSO_WQE_SG_PTR (9)
#define CNXK_SSO_WQE_LAYR_PTR (5)
#define CNXK_SSO_PRIORITY_CNT (0x8)
#define CNXK_SSO_WEIGHT_MAX (0x3f)
#define CNXK_SSO_WEIGHT_MIN (0x3)
@ -123,6 +124,7 @@ struct cnxk_sso_evdev {
uint64_t *timer_adptr_sz;
uint16_t vec_pool_cnt;
uint64_t *vec_pools;
struct cnxk_timesync_info *tstamp[RTE_MAX_ETHPORTS];
struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
/* Dev args */
uint32_t xae_cnt;
@ -140,12 +142,12 @@ struct cnxk_sso_evdev {
struct cn10k_sso_hws {
uint64_t base;
uint64_t gw_rdata;
/* PTP timestamp */
struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint32_t gw_wdata;
uint8_t swtag_req;
uint8_t hws_id;
/* PTP timestamp */
struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@ -160,11 +162,11 @@ struct cn10k_sso_hws {
struct cn9k_sso_hws {
uint64_t base;
uint64_t gw_wdata;
/* PTP timestamp */
struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t hws_id;
/* PTP timestamp */
struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@ -177,12 +179,12 @@ struct cn9k_sso_hws {
struct cn9k_sso_hws_dual {
uint64_t base[2]; /* Ping and Pong */
uint64_t gw_wdata;
/* PTP timestamp */
struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t vws; /* Ping pong bit */
uint8_t hws_id;
/* PTP timestamp */
struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;

View File

@ -207,6 +207,14 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
}
static void
cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev,
struct cnxk_sso_evdev *dev)
{
if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
dev->tstamp[port_id] = &cnxk_eth_dev->tstamp;
}
int
cnxk_sso_rx_adapter_queue_add(
const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@ -255,6 +263,7 @@ cnxk_sso_rx_adapter_queue_add(
roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, true,
dev->force_ena_bp, rxq_sp->tc);
cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
cnxk_eth_dev->nb_rxq_sso++;
}

View File

@ -1567,7 +1567,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
ol_flags3, mbuf3);
}
if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
if ((flags & NIX_RX_OFFLOAD_TSTAMP_F) &&
((flags & NIX_RX_VWQE_F) && tstamp)) {
const uint16x8_t len_off = {
0, /* ptype 0:15 */
0, /* ptype 16:32 */