numam-dpdk/drivers/event/octeontx/ssovf_worker.h
Pavan Nikhilesh d0d6549860 net/octeontx: support event Rx adapter
Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
2017-10-25 14:03:43 +02:00

167 lines
4.9 KiB
C

/*
* BSD LICENSE
*
* Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_common.h>
#include <rte_branch_prediction.h>
#include <octeontx_mbox.h>
#include "ssovf_evdev.h"
#include "octeontx_rxtx.h"
enum {
SSO_SYNC_ORDERED,
SSO_SYNC_ATOMIC,
SSO_SYNC_UNTAGGED,
SSO_SYNC_EMPTY
};
#ifndef __hot
#define __hot __attribute__((hot))
#endif
/* SSO Operations */
static __rte_always_inline struct rte_mbuf *
ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
{
struct rte_mbuf *mbuf;
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
rte_prefetch_non_temporal(wqe);
/* Get mbuf from wqe */
mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
OCTTX_PACKET_WQE_SKIP);
mbuf->packet_type =
ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
mbuf->pkt_len = wqe->s.w1.len;
mbuf->data_len = mbuf->pkt_len;
mbuf->nb_segs = 1;
mbuf->ol_flags = 0;
mbuf->port = port_id;
rte_mbuf_refcnt_set(mbuf, 1);
return mbuf;
}
static __rte_always_inline uint16_t
ssows_get_work(struct ssows *ws, struct rte_event *ev)
{
uint64_t get_work0, get_work1;
uint64_t sched_type_queue;
ssovf_load_pair(get_work0, get_work1, ws->getwork);
sched_type_queue = (get_work0 >> 32) & 0xfff;
ws->cur_tt = sched_type_queue & 0x3;
ws->cur_grp = sched_type_queue >> 2;
sched_type_queue = sched_type_queue << 38;
ev->event = sched_type_queue | (get_work0 & 0xffffffff);
if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
(ev->event >> 20) & 0xF);
} else {
ev->u64 = get_work1;
}
return !!get_work1;
}
static __rte_always_inline void
ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
uint64_t add_work0;
add_work0 = tag | ((uint64_t)(new_tt) << 32);
ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
}
static __rte_always_inline void
ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
uint64_t swtag_full0;
swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
((uint64_t)grp << 34);
ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
SSOW_VHWS_OP_SWTAG_FULL0));
}
static __rte_always_inline void
ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
{
uint64_t val;
val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
}
static __rte_always_inline void
ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
{
uint64_t val;
val = tag | ((uint64_t)(new_tt & 0x3) << 32);
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
}
static __rte_always_inline void
ssows_swtag_untag(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
ws->cur_tt = SSO_SYNC_UNTAGGED;
}
static __rte_always_inline void
ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
{
ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
SSOW_VHWS_OP_UPD_WQP_GRP0));
}
static __rte_always_inline void
ssows_desched(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
}
static __rte_always_inline void
ssows_swtag_wait(struct ssows *ws)
{
/* Wait for the SWTAG/SWTAG_FULL operation */
while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
;
}