event/octeontx: support Tx adapter

Add Tx adapter support and move few routines around to avoid code
duplication.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
This commit is contained in:
Pavan Nikhilesh 2018-09-28 17:11:33 +05:30 committed by Jerin Jacob
parent 37d291c033
commit 1dedffeba7
7 changed files with 162 additions and 35 deletions

View File

@ -17,7 +17,7 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
LDLIBS += -lrte_bus_vdev
LDLIBS += -lrte_bus_vdev -lrte_ethdev
EXPORT_MAP := rte_pmd_octeontx_event_version.map

View File

@ -146,6 +146,7 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
dev->enqueue_forward_burst = ssows_enq_fwd_burst;
dev->dequeue = ssows_deq;
dev->dequeue_burst = ssows_deq_burst;
dev->txa_enqueue = sso_event_tx_adapter_enqueue;
if (edev->is_timeout_deq) {
dev->dequeue = ssows_deq_timeout;
@ -491,6 +492,77 @@ ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
return 0;
}
static int
ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, uint32_t *caps)
{
int ret;
RTE_SET_USED(dev);
ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
if (ret)
*caps = 0;
else
*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
return 0;
}
static int
ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev)
{
RTE_SET_USED(id);
RTE_SET_USED(dev);
return 0;
}
static int
ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev)
{
RTE_SET_USED(id);
RTE_SET_USED(dev);
return 0;
}
static int
ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
{
RTE_SET_USED(id);
RTE_SET_USED(dev);
RTE_SET_USED(eth_dev);
RTE_SET_USED(tx_queue_id);
return 0;
}
static int
ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
{
RTE_SET_USED(id);
RTE_SET_USED(dev);
RTE_SET_USED(eth_dev);
RTE_SET_USED(tx_queue_id);
return 0;
}
static int
ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev)
{
RTE_SET_USED(id);
RTE_SET_USED(dev);
return 0;
}
static int
ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev)
{
RTE_SET_USED(id);
RTE_SET_USED(dev);
return 0;
}
static void
ssovf_dump(struct rte_eventdev *dev, FILE *f)
{
@ -619,6 +691,14 @@ static struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
.eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get,
.eth_tx_adapter_create = ssovf_eth_tx_adapter_create,
.eth_tx_adapter_free = ssovf_eth_tx_adapter_free,
.eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add,
.eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del,
.eth_tx_adapter_start = ssovf_eth_tx_adapter_start,
.eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop,
.timer_adapter_caps_get = ssovf_timvf_caps_get,
.dev_selftest = test_eventdev_octeontx,

View File

@ -5,6 +5,7 @@
#ifndef __SSOVF_EVDEV_H__
#define __SSOVF_EVDEV_H__
#include <rte_event_eth_tx_adapter.h>
#include <rte_eventdev_pmd_vdev.h>
#include <rte_io.h>
@ -179,6 +180,8 @@ typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
ssows_handle_event_t fn, void *arg);
void ssows_reset(struct ssows *ws);
uint16_t sso_event_tx_adapter_enqueue(void *port,
struct rte_event ev[], uint16_t nb_events);
int ssovf_info(struct ssovf_info *info);
void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
int test_eventdev_octeontx(void);

View File

@ -261,3 +261,47 @@ ssows_reset(struct ssows *ws)
ssows_swtag_untag(ws);
}
}
uint16_t
sso_event_tx_adapter_enqueue(void *port,
struct rte_event ev[], uint16_t nb_events)
{
uint16_t port_id;
uint16_t queue_id;
struct rte_mbuf *m;
struct rte_eth_dev *ethdev;
struct ssows *ws = port;
struct octeontx_txq *txq;
octeontx_dq_t *dq;
RTE_SET_USED(nb_events);
switch (ev->sched_type) {
case SSO_SYNC_ORDERED:
ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
rte_cio_wmb();
ssows_swtag_wait(ws);
break;
case SSO_SYNC_UNTAGGED:
ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
ev->queue_id);
rte_cio_wmb();
ssows_swtag_wait(ws);
break;
case SSO_SYNC_ATOMIC:
rte_cio_wmb();
break;
}
m = ev[0].mbuf;
port_id = m->port;
queue_id = rte_event_eth_tx_adapter_txq_get(m);
ethdev = &rte_eth_devices[port_id];
txq = ethdev->data->tx_queues[queue_id];
dq = &txq->dq;
if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,
m) < 0)
return 0;
return 1;
}

View File

@ -42,6 +42,7 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
mbuf->ol_flags = 0;
mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
rte_mbuf_refcnt_set(mbuf, 1);
return mbuf;
}

View File

@ -19,40 +19,6 @@
#include "octeontx_rxtx.h"
#include "octeontx_logs.h"
static __rte_always_inline uint16_t __hot
__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
struct rte_mbuf *tx_pkt)
{
uint64_t cmd_buf[4];
uint16_t gaura_id;
if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
return -ENOSPC;
/* Get the gaura Id */
gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
/* Setup PKO_SEND_HDR_S */
cmd_buf[0] = tx_pkt->data_len & 0xffff;
cmd_buf[1] = 0x0;
/* Set don't free bit if reference count > 1 */
if (rte_mbuf_refcnt_read(tx_pkt) > 1)
cmd_buf[0] |= (1ULL << 58); /* SET DF */
/* Setup PKO_SEND_GATHER_S */
cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
PKO_SEND_GATHER_LDTYPE(0x1ull) |
PKO_SEND_GATHER_GAUAR((long)gaura_id) |
tx_pkt->data_len;
octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
return 0;
}
uint16_t __hot
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{

View File

@ -100,6 +100,39 @@ ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
};
static __rte_always_inline int
__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
struct rte_mbuf *tx_pkt)
{
uint64_t cmd_buf[4] __rte_cache_aligned;
uint16_t gaura_id;
if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
return -ENOSPC;
/* Get the gaura Id */
gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
/* Setup PKO_SEND_HDR_S */
cmd_buf[0] = tx_pkt->data_len & 0xffff;
cmd_buf[1] = 0x0;
/* Set don't free bit if reference count > 1 */
if (rte_mbuf_refcnt_read(tx_pkt) > 1)
cmd_buf[0] |= (1ULL << 58); /* SET DF */
/* Setup PKO_SEND_GATHER_S */
cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
PKO_SEND_GATHER_LDTYPE(0x1ull) |
PKO_SEND_GATHER_GAUAR((long)gaura_id) |
tx_pkt->data_len;
octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
return 0;
}
uint16_t
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);