event/octeontx: create and free timer adapter

When the application requests to create a timer device, Octeontx TIM
create does the following:
- Get the requested TIMvf ring based on adapter_id.
- Verify the config parameters supplied.
- Allocate memory required for
  * Buckets based on min and max timeout supplied.
  * Allocate the chunk pool based on the number of timers.
- Clear the interrupts.

On Free:
- Free the allocated bucket and chunk memory.
- Free private data used by TIMvf.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
This commit is contained in:
Pavan Nikhilesh 2018-04-10 02:30:27 +05:30 committed by Thomas Monjalon
parent fd5baf09cd
commit f874c1eb15
6 changed files with 310 additions and 1 deletions

View File

@ -113,3 +113,19 @@ Rx adapter support
When eth_octeontx is used as Rx adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.
Event timer adapter support
~~~~~~~~~~~~~~~~~~~~~~~~~~~
When timvf is used as Event timer adapter the clock source mapping is as
follows:
.. code-block:: console
RTE_EVENT_TIMER_ADAPTER_CPU_CLK = TIM_CLK_SRC_SCLK
RTE_EVENT_TIMER_ADAPTER_EXT_CLK0 = TIM_CLK_SRC_GPIO
RTE_EVENT_TIMER_ADAPTER_EXT_CLK1 = TIM_CLK_SRC_GTI
RTE_EVENT_TIMER_ADAPTER_EXT_CLK2 = TIM_CLK_SRC_PTP
When timvf is used as Event timer adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.

View File

@ -13,6 +13,7 @@ CFLAGS += $(WERROR_FLAGS)
CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
@ -29,6 +30,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays

View File

@ -4,7 +4,10 @@
sources = files('ssovf_worker.c',
'ssovf_evdev.c',
'ssovf_evdev_selftest.c',
'ssovf_probe.c'
'ssovf_probe.c',
'timvf_evdev.c',
'timvf_probe.c'
)
allow_experimental_apis = true
deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx']

View File

@ -18,6 +18,7 @@
#include <rte_bus_vdev.h>
#include "ssovf_evdev.h"
#include "timvf_evdev.h"
int otx_logtype_ssovf;
@ -601,6 +602,13 @@ ssovf_selftest(const char *key __rte_unused, const char *value,
return 0;
}
static int
ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
{
return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 0);
}
/* Initialize and register event driver with DPDK Application */
static struct rte_eventdev_ops ssovf_ops = {
.dev_infos_get = ssovf_info_get,
@ -621,6 +629,8 @@ static struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
.timer_adapter_caps_get = ssovf_timvf_caps_get,
.dev_selftest = test_eventdev_octeontx,
.dump = ssovf_dump,

View File

@ -0,0 +1,146 @@
/*
* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Cavium, Inc
*/
#include "timvf_evdev.h"
int otx_logtype_timvf;
RTE_INIT(otx_timvf_init_log);
static void
otx_timvf_init_log(void)
{
otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
if (otx_logtype_timvf >= 0)
rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
}
static void
timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer_adapter_info *adptr_info)
{
struct timvf_ring *timr = adptr->data->adapter_priv;
adptr_info->max_tmo_ns = timr->max_tout;
adptr_info->min_resolution_ns = timr->tck_nsec;
rte_memcpy(&adptr_info->conf, &adptr->data->conf,
sizeof(struct rte_event_timer_adapter_conf));
}
static int
timvf_ring_create(struct rte_event_timer_adapter *adptr)
{
char pool_name[25];
int ret;
uint64_t nb_timers;
struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
struct timvf_ring *timr;
struct timvf_info tinfo;
const char *mempool_ops;
if (timvf_info(&tinfo) < 0)
return -ENODEV;
if (adptr->data->id >= tinfo.total_timvfs)
return -ENODEV;
timr = rte_zmalloc("octeontx_timvf_priv",
sizeof(struct timvf_ring), 0);
if (timr == NULL)
return -ENOMEM;
adptr->data->adapter_priv = timr;
/* Check config parameters. */
if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
(!rcfg->timer_tick_ns ||
rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
timvf_log_err("Too low timer ticks");
goto cfg_err;
}
timr->clk_src = (int) rcfg->clk_src;
timr->tim_ring_id = adptr->data->id;
timr->tck_nsec = rcfg->timer_tick_ns;
timr->max_tout = rcfg->max_tmo_ns;
timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
nb_timers = rcfg->nb_timers;
timr->get_target_bkt = bkt_mod;
timr->nb_chunks = nb_timers / nb_chunk_slots;
timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
(timr->nb_bkts) * sizeof(struct tim_mem_bucket),
0);
if (timr->bkt == NULL)
goto mem_err;
snprintf(pool_name, 30, "timvf_chunk_pool%d", timr->tim_ring_id);
timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
0);
if (!timr->chunk_pool) {
rte_free(timr->bkt);
timvf_log_err("Unable to create chunkpool.");
return -ENOMEM;
}
mempool_ops = rte_mbuf_best_mempool_ops();
ret = rte_mempool_set_ops_byname(timr->chunk_pool,
mempool_ops, NULL);
if (ret != 0) {
timvf_log_err("Unable to set chunkpool ops.");
goto mem_err;
}
ret = rte_mempool_populate_default(timr->chunk_pool);
if (ret < 0) {
timvf_log_err("Unable to set populate chunkpool.");
goto mem_err;
}
timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
return 0;
mem_err:
rte_free(timr);
return -ENOMEM;
cfg_err:
rte_free(timr);
return -EINVAL;
}
static int
timvf_ring_free(struct rte_event_timer_adapter *adptr)
{
struct timvf_ring *timr = adptr->data->adapter_priv;
rte_mempool_free(timr->chunk_pool);
rte_free(timr->bkt);
rte_free(adptr->data->adapter_priv);
return 0;
}
static struct rte_event_timer_adapter_ops timvf_ops = {
.init = timvf_ring_create,
.uninit = timvf_ring_free,
.get_info = timvf_ring_info_get,
};
int
timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
uint8_t enable_stats)
{
RTE_SET_USED(dev);
RTE_SET_USED(flags);
RTE_SET_USED(enable_stats);
*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
*ops = &timvf_ops;
return -EINVAL;
}

View File

@ -7,6 +7,22 @@
#define __TIMVF_EVDEV_H__
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_debug.h>
#include <rte_eal.h>
#include <rte_eventdev.h>
#include <rte_event_timer_adapter.h>
#include <rte_event_timer_adapter_pmd.h>
#include <rte_io.h>
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_mempool.h>
#include <rte_memzone.h>
#include <rte_pci.h>
#include <rte_prefetch.h>
#include <rte_reciprocal.h>
#include <octeontx_mbox.h>
@ -20,14 +36,129 @@
#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__)
#define timvf_func_trace timvf_log_dbg
#define TIM_COPROC (8)
#define TIM_GET_DEV_INFO (1)
#define TIM_GET_RING_INFO (2)
#define TIM_SET_RING_INFO (3)
#define TIM_RING_START_CYC_GET (4)
#define TIM_MAX_RINGS (64)
#define TIM_DEV_PER_NODE (1)
#define TIM_VF_PER_DEV (64)
#define TIM_RING_PER_DEV (TIM_VF_PER_DEV)
#define TIM_RING_NODE_SHIFT (6)
#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1)
#define TIM_RING_INVALID (-1)
#define TIM_MIN_INTERVAL (1E3)
#define TIM_MAX_INTERVAL ((1ull << 32) - 1)
#define TIM_MAX_BUCKETS (1ull << 20)
#define TIM_CHUNK_SIZE (4096)
#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32)
#define TIMVF_MAX_BURST (8)
/* TIM VF Control/Status registers (CSRs): */
/* VF_BAR0: */
#define TIM_VF_NRSPERR_INT (0x0)
#define TIM_VF_NRSPERR_INT_W1S (0x8)
#define TIM_VF_NRSPERR_ENA_W1C (0x10)
#define TIM_VF_NRSPERR_ENA_W1S (0x18)
#define TIM_VRING_FR_RN_CYCLES (0x20)
#define TIM_VRING_FR_RN_GPIOS (0x28)
#define TIM_VRING_FR_RN_GTI (0x30)
#define TIM_VRING_FR_RN_PTP (0x38)
#define TIM_VRING_CTL0 (0x40)
#define TIM_VRING_CTL1 (0x50)
#define TIM_VRING_CTL2 (0x60)
#define TIM_VRING_BASE (0x100)
#define TIM_VRING_AURA (0x108)
#define TIM_VRING_REL (0x110)
#define timvf_read64 rte_read64_relaxed
#define timvf_write64 rte_write64_relaxed
extern int otx_logtype_timvf;
static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1;
struct timvf_info {
uint16_t domain; /* Domain id */
uint8_t total_timvfs; /* Total timvf available in domain */
};
enum timvf_clk_src {
TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
};
/* TIM_MEM_BUCKET */
struct tim_mem_bucket {
uint64_t first_chunk;
union {
uint64_t w1;
struct {
uint32_t nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
uint8_t lock;
int16_t chunk_remainder;
};
};
uint64_t current_chunk;
uint64_t pad;
} __rte_packed;
struct tim_mem_entry {
uint64_t w0;
uint64_t wqe;
} __rte_packed;
struct timvf_ctrl_reg {
uint64_t rctrl0;
uint64_t rctrl1;
uint64_t rctrl2;
uint8_t use_pmu;
} __rte_packed;
struct timvf_ring;
typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts);
typedef struct tim_mem_entry * (*refill_chunk)(
struct tim_mem_bucket * const bkt,
struct timvf_ring * const timr);
struct timvf_ring {
bkt_id get_target_bkt;
refill_chunk refill_chunk;
struct rte_reciprocal_u64 fast_div;
uint64_t ring_start_cyc;
uint32_t nb_bkts;
struct tim_mem_bucket *bkt;
void *chunk_pool;
uint64_t tck_int;
uint64_t tck_nsec;
void *vbar0;
void *bkt_pos;
uint64_t max_tout;
uint64_t nb_chunks;
enum timvf_clk_src clk_src;
uint16_t tim_ring_id;
} __rte_cache_aligned;
static __rte_always_inline uint32_t
bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
{
return rel_bkt % nb_bkts;
}
int timvf_info(struct timvf_info *tinfo);
void *timvf_bar(uint8_t id, uint8_t bar);
int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
uint8_t enable_stats);
#endif /* __TIMVF_EVDEV_H__ */