event/octeontx2: add Tx adapter

Add event eth Tx adapter support to octeontx2 SSO.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2019-07-04 07:49:39 +05:30 committed by Jerin Jacob
parent 8980a15300
commit 9c0a9024be
6 changed files with 310 additions and 0 deletions

View File

@ -168,6 +168,39 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
};
/* Tx modes */
const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2] = {
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
[f4][f3][f2][f1][f0] = otx2_ssogws_tx_adptr_enq_ ## name,
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
};
const event_tx_adapter_enqueue
ssogws_tx_adptr_enq_seg[2][2][2][2][2] = {
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
[f4][f3][f2][f1][f0] = otx2_ssogws_tx_adptr_enq_seg_ ## name,
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
};
const event_tx_adapter_enqueue
ssogws_dual_tx_adptr_enq[2][2][2][2][2] = {
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
[f4][f3][f2][f1][f0] = otx2_ssogws_dual_tx_adptr_enq_ ## name,
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
};
const event_tx_adapter_enqueue
ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2] = {
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
[f4][f3][f2][f1][f0] = \
otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
};
event_dev->enqueue = otx2_ssogws_enq;
event_dev->enqueue_burst = otx2_ssogws_enq_burst;
event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst;
@ -238,6 +271,23 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
}
}
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
/* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
} else {
event_dev->txa_enqueue = ssogws_tx_adptr_enq
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
if (dev->dual_ws) {
event_dev->enqueue = otx2_ssogws_dual_enq;
event_dev->enqueue_burst = otx2_ssogws_dual_enq_burst;
@ -352,6 +402,31 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
NIX_RX_OFFLOAD_RSS_F)];
}
}
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
/* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_VLAN_QINQ_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
} else {
event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_VLAN_QINQ_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
}
rte_mb();
}
@ -1413,6 +1488,10 @@ static struct rte_eventdev_ops otx2_sso_ops = {
.eth_rx_adapter_start = otx2_sso_rx_adapter_start,
.eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
.eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
.eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
.eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
.timer_adapter_caps_get = otx2_tim_caps_get,
.xstats_get = otx2_sso_xstats_get,

View File

@ -8,6 +8,7 @@
#include <rte_eventdev.h>
#include <rte_eventdev_pmd.h>
#include <rte_event_eth_rx_adapter.h>
#include <rte_event_eth_tx_adapter.h>
#include "otx2_common.h"
#include "otx2_dev.h"
@ -21,6 +22,7 @@
#define OTX2_SSO_MAX_VHGRP RTE_EVENT_MAX_QUEUES_PER_DEV
#define OTX2_SSO_MAX_VHWS (UINT8_MAX)
#define OTX2_SSO_FC_NAME "otx2_evdev_xaq_fc"
#define OTX2_SSO_SQB_LIMIT (0x180)
#define OTX2_SSO_XAQ_SLACK (8)
#define OTX2_SSO_XAQ_CACHE_CNT (0x7)
@ -133,6 +135,7 @@ struct otx2_sso_evdev {
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
uint64_t rx_offloads;
uint64_t tx_offloads;
uint16_t rx_adptr_pool_cnt;
uint32_t adptr_xae_cnt;
uint64_t *rx_adptr_pools;
@ -323,6 +326,22 @@ uint16_t otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
uint16_t otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],\
uint16_t nb_events); \
uint16_t otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events); \
uint16_t otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events); \
uint16_t otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events); \
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
uint32_t event_type);
int sso_xae_reconfigure(struct rte_eventdev *event_dev);
@ -342,6 +361,19 @@ int otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev);
int otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev);
int otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev,
uint32_t *caps);
int otx2_sso_tx_adapter_queue_add(uint8_t id,
const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id);
int otx2_sso_tx_adapter_queue_del(uint8_t id,
const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id);
/* Clean up API's */
typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,

View File

@ -349,3 +349,89 @@ otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
return 0;
}
int
otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, uint32_t *caps)
{
int ret;
RTE_SET_USED(dev);
ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
if (ret)
*caps = 0;
else
*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
return 0;
}
static int
sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
{
struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
struct npa_aq_enq_req *aura_req;
aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
aura_req->ctype = NPA_AQ_CTYPE_AURA;
aura_req->op = NPA_AQ_INSTOP_WRITE;
aura_req->aura.limit = nb_sqb_bufs;
aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
return otx2_mbox_process(npa_lf->mbox);
}
int
otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id)
{
struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
struct otx2_eth_txq *txq;
int i;
RTE_SET_USED(id);
if (tx_queue_id < 0) {
for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
sso_sqb_aura_limit_edit(txq->sqb_pool,
OTX2_SSO_SQB_LIMIT);
}
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
}
dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
}
int
otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id)
{
struct otx2_eth_txq *txq;
int i;
RTE_SET_USED(id);
RTE_SET_USED(eth_dev);
RTE_SET_USED(event_dev);
if (tx_queue_id < 0) {
for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
sso_sqb_aura_limit_edit(txq->sqb_pool,
txq->nb_sqb_bufs);
}
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);
}
return 0;
}

View File

@ -267,6 +267,35 @@ otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
return 1;
}
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __hot \
otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
uint16_t nb_events) \
{ \
struct otx2_ssogws *ws = port; \
uint64_t cmd[sz]; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(ws, ev, cmd, flags); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __hot \
otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
uint16_t nb_events) \
{ \
struct otx2_ssogws *ws = port; \
uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(ws, ev, cmd, (flags) | \
NIX_TX_MULTI_SEG_F); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
void
ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, uintptr_t base,
otx2_handle_event_t fn, void *arg)

View File

@ -219,4 +219,53 @@ otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
#endif
}
static __rte_always_inline void
otx2_ssogws_head_wait(struct otx2_ssogws *ws, const uint8_t wait_flag)
{
while (wait_flag && !(otx2_read64(ws->tag_op) & BIT_ULL(35)))
;
rte_cio_wmb();
}
static __rte_always_inline const struct otx2_eth_txq *
otx2_ssogws_xtract_meta(struct rte_mbuf *m)
{
return rte_eth_devices[m->port].data->tx_queues[
rte_event_eth_tx_adapter_txq_get(m)];
}
static __rte_always_inline void
otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
uint64_t *cmd, const uint32_t flags)
{
otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
otx2_nix_xmit_prepare(m, cmd, flags);
}
static __rte_always_inline uint16_t
otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event ev[],
uint64_t *cmd, const uint32_t flags)
{
struct rte_mbuf *m = ev[0].mbuf;
const struct otx2_eth_txq *txq = otx2_ssogws_xtract_meta(m);
otx2_ssogws_head_wait(ws, !ev->sched_type);
otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
if (flags & NIX_TX_MULTI_SEG_F) {
const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
m->ol_flags, segdw, flags);
otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr, segdw);
} else {
/* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
m->ol_flags, 4, flags);
otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr, flags);
}
return 1;
}
#endif

View File

@ -305,3 +305,38 @@ otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __hot \
otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events) \
{ \
struct otx2_ssogws_dual *ws = port; \
struct otx2_ssogws *vws = \
(struct otx2_ssogws *)&ws->ws_state[!ws->vws]; \
uint64_t cmd[sz]; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(vws, ev, cmd, flags); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __hot \
otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events) \
{ \
struct otx2_ssogws_dual *ws = port; \
struct otx2_ssogws *vws = \
(struct otx2_ssogws *)&ws->ws_state[!ws->vws]; \
uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(vws, ev, cmd, (flags) | \
NIX_TX_MULTI_SEG_F); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T