event/octeontx: support multi-segment
Adding support for multi segment to the eventdev PMD. Signed-off-by: Harman Kalra <hkalra@marvell.com> Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
parent
a5f30c925b
commit
844d302d73
@ -146,15 +146,31 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
|
||||
dev->enqueue_burst = ssows_enq_burst;
|
||||
dev->enqueue_new_burst = ssows_enq_new_burst;
|
||||
dev->enqueue_forward_burst = ssows_enq_fwd_burst;
|
||||
dev->dequeue = ssows_deq;
|
||||
dev->dequeue_burst = ssows_deq_burst;
|
||||
dev->txa_enqueue = sso_event_tx_adapter_enqueue;
|
||||
dev->txa_enqueue_same_dest = dev->txa_enqueue;
|
||||
|
||||
if (edev->is_timeout_deq) {
|
||||
dev->dequeue = ssows_deq_timeout;
|
||||
dev->dequeue_burst = ssows_deq_timeout_burst;
|
||||
if (!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)) {
|
||||
dev->dequeue = ssows_deq_mseg;
|
||||
dev->dequeue_burst = ssows_deq_burst_mseg;
|
||||
|
||||
if (edev->is_timeout_deq) {
|
||||
dev->dequeue = ssows_deq_timeout_mseg;
|
||||
dev->dequeue_burst = ssows_deq_timeout_burst_mseg;
|
||||
}
|
||||
} else {
|
||||
dev->dequeue = ssows_deq;
|
||||
dev->dequeue_burst = ssows_deq_burst;
|
||||
|
||||
if (edev->is_timeout_deq) {
|
||||
dev->dequeue = ssows_deq_timeout;
|
||||
dev->dequeue_burst = ssows_deq_timeout_burst;
|
||||
}
|
||||
}
|
||||
|
||||
if (!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F))
|
||||
dev->txa_enqueue = sso_event_tx_adapter_enqueue_mseg;
|
||||
else
|
||||
dev->txa_enqueue = sso_event_tx_adapter_enqueue;
|
||||
|
||||
dev->txa_enqueue_same_dest = dev->txa_enqueue;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -411,6 +427,7 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
|
||||
{
|
||||
int ret = 0;
|
||||
const struct octeontx_nic *nic = eth_dev->data->dev_private;
|
||||
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
|
||||
pki_mod_qos_t pki_qos;
|
||||
RTE_SET_USED(dev);
|
||||
|
||||
@ -447,6 +464,8 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
|
||||
ssovf_log_err("failed to modify QOS, port=%d, q=%d",
|
||||
nic->port_id, queue_conf->ev.queue_id);
|
||||
|
||||
edev->rx_offload_flags = nic->rx_offload_flags;
|
||||
edev->tx_offload_flags = nic->tx_offload_flags;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include <octeontx_mbox.h>
|
||||
#include <octeontx_ethdev.h>
|
||||
|
||||
#include "octeontx_rxtx.h"
|
||||
|
||||
#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
|
||||
|
||||
#define SSOVF_LOG(level, fmt, args...) \
|
||||
@ -132,6 +134,7 @@ enum ssovf_type {
|
||||
};
|
||||
|
||||
struct ssovf_evdev {
|
||||
OFFLOAD_FLAGS; /*Sequence should not be changed */
|
||||
uint8_t max_event_queues;
|
||||
uint8_t max_event_ports;
|
||||
uint8_t is_timeout_deq;
|
||||
@ -175,6 +178,14 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
|
||||
uint64_t timeout_ticks);
|
||||
uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events, uint64_t timeout_ticks);
|
||||
uint16_t ssows_deq_mseg(void *port, struct rte_event *ev,
|
||||
uint64_t timeout_ticks);
|
||||
uint16_t ssows_deq_burst_mseg(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events, uint64_t timeout_ticks);
|
||||
uint16_t ssows_deq_timeout_mseg(void *port, struct rte_event *ev,
|
||||
uint64_t timeout_ticks);
|
||||
uint16_t ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events, uint64_t timeout_ticks);
|
||||
|
||||
typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
|
||||
void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
|
||||
@ -182,6 +193,8 @@ void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
|
||||
void ssows_reset(struct ssows *ws);
|
||||
uint16_t sso_event_tx_adapter_enqueue(void *port,
|
||||
struct rte_event ev[], uint16_t nb_events);
|
||||
uint16_t sso_event_tx_adapter_enqueue_mseg(void *port,
|
||||
struct rte_event ev[], uint16_t nb_events);
|
||||
int ssovf_info(struct ssovf_info *info);
|
||||
void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
|
||||
int test_eventdev_octeontx(void);
|
||||
|
@ -103,7 +103,7 @@ ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
|
||||
ssows_swtag_wait(ws);
|
||||
return 1;
|
||||
} else {
|
||||
return ssows_get_work(ws, ev);
|
||||
return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -118,9 +118,9 @@ ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
|
||||
ws->swtag_req = 0;
|
||||
ssows_swtag_wait(ws);
|
||||
} else {
|
||||
ret = ssows_get_work(ws, ev);
|
||||
ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
|
||||
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
|
||||
ret = ssows_get_work(ws, ev);
|
||||
ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -143,6 +143,61 @@ ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
|
||||
return ssows_deq_timeout(port, ev, timeout_ticks);
|
||||
}
|
||||
|
||||
__rte_always_inline uint16_t __rte_hot
|
||||
ssows_deq_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
|
||||
{
|
||||
struct ssows *ws = port;
|
||||
|
||||
RTE_SET_USED(timeout_ticks);
|
||||
|
||||
if (ws->swtag_req) {
|
||||
ws->swtag_req = 0;
|
||||
ssows_swtag_wait(ws);
|
||||
return 1;
|
||||
} else {
|
||||
return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
|
||||
OCCTX_RX_MULTI_SEG_F);
|
||||
}
|
||||
}
|
||||
|
||||
__rte_always_inline uint16_t __rte_hot
|
||||
ssows_deq_timeout_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
|
||||
{
|
||||
struct ssows *ws = port;
|
||||
uint64_t iter;
|
||||
uint16_t ret = 1;
|
||||
|
||||
if (ws->swtag_req) {
|
||||
ws->swtag_req = 0;
|
||||
ssows_swtag_wait(ws);
|
||||
} else {
|
||||
ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
|
||||
OCCTX_RX_MULTI_SEG_F);
|
||||
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
|
||||
ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
|
||||
OCCTX_RX_MULTI_SEG_F);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t __rte_hot
|
||||
ssows_deq_burst_mseg(void *port, struct rte_event ev[], uint16_t nb_events,
|
||||
uint64_t timeout_ticks)
|
||||
{
|
||||
RTE_SET_USED(nb_events);
|
||||
|
||||
return ssows_deq_mseg(port, ev, timeout_ticks);
|
||||
}
|
||||
|
||||
uint16_t __rte_hot
|
||||
ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events, uint64_t timeout_ticks)
|
||||
{
|
||||
RTE_SET_USED(nb_events);
|
||||
|
||||
return ssows_deq_timeout_mseg(port, ev, timeout_ticks);
|
||||
}
|
||||
|
||||
__rte_always_inline uint16_t __rte_hot
|
||||
ssows_enq(void *port, const struct rte_event *ev)
|
||||
{
|
||||
@ -231,7 +286,9 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id,
|
||||
ev.event = sched_type_queue | (get_work0 & 0xffffffff);
|
||||
if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
|
||||
ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
|
||||
(ev.event >> 20) & 0x7F);
|
||||
(ev.event >> 20) & 0x7F,
|
||||
OCCTX_RX_OFFLOAD_NONE |
|
||||
OCCTX_RX_MULTI_SEG_F);
|
||||
else
|
||||
ev.u64 = get_work1;
|
||||
|
||||
@ -262,9 +319,9 @@ ssows_reset(struct ssows *ws)
|
||||
}
|
||||
}
|
||||
|
||||
uint16_t
|
||||
sso_event_tx_adapter_enqueue(void *port,
|
||||
struct rte_event ev[], uint16_t nb_events)
|
||||
static __rte_always_inline uint16_t
|
||||
__sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events, const uint16_t flag)
|
||||
{
|
||||
uint16_t port_id;
|
||||
uint16_t queue_id;
|
||||
@ -298,5 +355,22 @@ sso_event_tx_adapter_enqueue(void *port,
|
||||
ethdev = &rte_eth_devices[port_id];
|
||||
txq = ethdev->data->tx_queues[queue_id];
|
||||
|
||||
return __octeontx_xmit_pkts(txq, &m, 1, cmd, OCCTX_TX_OFFLOAD_NONE);
|
||||
return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
|
||||
}
|
||||
|
||||
uint16_t
|
||||
sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events)
|
||||
{
|
||||
return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
|
||||
OCCTX_TX_OFFLOAD_NONE);
|
||||
}
|
||||
|
||||
uint16_t
|
||||
sso_event_tx_adapter_enqueue_mseg(void *port, struct rte_event ev[],
|
||||
uint16_t nb_events)
|
||||
{
|
||||
return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
|
||||
OCCTX_TX_OFFLOAD_NONE |
|
||||
OCCTX_TX_MULTI_SEG_F);
|
||||
}
|
||||
|
@ -19,8 +19,43 @@ enum {
|
||||
|
||||
/* SSO Operations */
|
||||
|
||||
static __rte_always_inline void
|
||||
ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
|
||||
struct rte_mbuf *mbuf)
|
||||
{
|
||||
octtx_pki_buflink_t *buflink;
|
||||
rte_iova_t *iova_list;
|
||||
uint8_t nb_segs;
|
||||
uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
|
||||
|
||||
nb_segs = wqe->s.w0.bufs;
|
||||
|
||||
buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
|
||||
sizeof(octtx_pki_buflink_t));
|
||||
|
||||
while (--nb_segs) {
|
||||
iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
|
||||
mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
|
||||
- (OCTTX_PACKET_LATER_SKIP / 128);
|
||||
mbuf = mbuf->next;
|
||||
|
||||
mbuf->data_off = sizeof(octtx_pki_buflink_t);
|
||||
|
||||
__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
|
||||
if (nb_segs == 1)
|
||||
mbuf->data_len = bytes_left;
|
||||
else
|
||||
mbuf->data_len = buflink->w0.s.size;
|
||||
|
||||
bytes_left = bytes_left - buflink->w0.s.size;
|
||||
buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static __rte_always_inline struct rte_mbuf *
|
||||
ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
|
||||
ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
|
||||
const uint16_t flag)
|
||||
{
|
||||
struct rte_mbuf *mbuf;
|
||||
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
|
||||
@ -31,10 +66,18 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
|
||||
mbuf->packet_type =
|
||||
ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
|
||||
mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
|
||||
mbuf->pkt_len = wqe->s.w1.len;
|
||||
mbuf->data_len = mbuf->pkt_len;
|
||||
mbuf->nb_segs = 1;
|
||||
mbuf->ol_flags = 0;
|
||||
mbuf->pkt_len = wqe->s.w1.len;
|
||||
|
||||
if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
|
||||
mbuf->nb_segs = wqe->s.w0.bufs;
|
||||
mbuf->data_len = wqe->s.w5.size;
|
||||
ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
|
||||
} else {
|
||||
mbuf->nb_segs = 1;
|
||||
mbuf->data_len = mbuf->pkt_len;
|
||||
}
|
||||
|
||||
mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
|
||||
rte_mbuf_refcnt_set(mbuf, 1);
|
||||
|
||||
@ -45,14 +88,29 @@ static __rte_always_inline void
|
||||
ssovf_octeontx_wqe_free(uint64_t work)
|
||||
{
|
||||
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
|
||||
struct rte_mbuf *mbuf;
|
||||
uint8_t nb_segs = wqe->s.w0.bufs;
|
||||
octtx_pki_buflink_t *buflink;
|
||||
struct rte_mbuf *mbuf, *head;
|
||||
rte_iova_t *iova_list;
|
||||
|
||||
mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
|
||||
rte_pktmbuf_free(mbuf);
|
||||
buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
|
||||
sizeof(octtx_pki_buflink_t));
|
||||
head = mbuf;
|
||||
while (--nb_segs) {
|
||||
iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
|
||||
mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
|
||||
- (OCTTX_PACKET_LATER_SKIP / 128);
|
||||
|
||||
mbuf->next = NULL;
|
||||
rte_pktmbuf_free(mbuf);
|
||||
buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
|
||||
}
|
||||
rte_pktmbuf_free(head);
|
||||
}
|
||||
|
||||
static __rte_always_inline uint16_t
|
||||
ssows_get_work(struct ssows *ws, struct rte_event *ev)
|
||||
ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
|
||||
{
|
||||
uint64_t get_work0, get_work1;
|
||||
uint64_t sched_type_queue;
|
||||
@ -67,7 +125,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
|
||||
|
||||
if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
|
||||
ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
|
||||
(ev->event >> 20) & 0x7F);
|
||||
(ev->event >> 20) & 0x7F, flag);
|
||||
} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
|
||||
ssovf_octeontx_wqe_free(get_work1);
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user