eventdev: move inline APIs into separate structure

Move fastpath inline function pointers from rte_eventdev into a
separate structure accessed via a flat array.
The intention is to make rte_eventdev and related structures private
to avoid future API/ABI breakages.`

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
This commit is contained in:
Pavan Nikhilesh 2021-10-19 05:05:59 +05:30 committed by Jerin Jacob
parent 9c67fcbfd6
commit d35e61322d
8 changed files with 223 additions and 12 deletions

View File

@ -435,6 +435,12 @@ ABI Changes
were added in structure ``rte_event_eth_rx_adapter_stats`` to get additional
status.
* eventdev: A new structure ``rte_event_fp_ops`` has been added which is now used
by the fastpath inline functions. The structures ``rte_eventdev``,
``rte_eventdev_data`` have been made internal. ``rte_eventdevs[]`` can't be
accessed directly by user any more. This change is transparent to both
applications and PMDs.
Known Issues
------------

View File

@ -1188,4 +1188,42 @@ __rte_internal
int
rte_event_pmd_release(struct rte_eventdev *eventdev);
/**
*
* @internal
* This is the last step of device probing.
* It must be called after a port is allocated and initialized successfully.
*
* @param eventdev
* New event device.
*/
__rte_internal
void
event_dev_probing_finish(struct rte_eventdev *eventdev);
/**
* Reset eventdevice fastpath APIs to dummy values.
*
* @param fp_ops
* The *fp_ops* pointer to reset.
*/
__rte_internal
void
event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op);
/**
* Set eventdevice fastpath APIs to event device values.
*
* @param fp_ops
* The *fp_ops* pointer to set.
*/
__rte_internal
void
event_dev_fp_ops_set(struct rte_event_fp_ops *fp_ops,
const struct rte_eventdev *dev);
#ifdef __cplusplus
}
#endif
#endif /* _RTE_EVENTDEV_PMD_H_ */

View File

@ -67,8 +67,10 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
/* Invoke PMD device initialization function */
retval = devinit(eventdev);
if (retval == 0)
if (retval == 0) {
event_dev_probing_finish(eventdev);
return 0;
}
RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
" failed", pci_drv->driver.name,

View File

@ -0,0 +1,112 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include "eventdev_pmd.h"
#include "rte_eventdev.h"
static uint16_t
dummy_event_enqueue(__rte_unused void *port,
__rte_unused const struct rte_event *ev)
{
RTE_EDEV_LOG_ERR(
"event enqueue requested for unconfigured event device");
return 0;
}
static uint16_t
dummy_event_enqueue_burst(__rte_unused void *port,
__rte_unused const struct rte_event ev[],
__rte_unused uint16_t nb_events)
{
RTE_EDEV_LOG_ERR(
"event enqueue burst requested for unconfigured event device");
return 0;
}
static uint16_t
dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
__rte_unused uint64_t timeout_ticks)
{
RTE_EDEV_LOG_ERR(
"event dequeue requested for unconfigured event device");
return 0;
}
static uint16_t
dummy_event_dequeue_burst(__rte_unused void *port,
__rte_unused struct rte_event ev[],
__rte_unused uint16_t nb_events,
__rte_unused uint64_t timeout_ticks)
{
RTE_EDEV_LOG_ERR(
"event dequeue burst requested for unconfigured event device");
return 0;
}
static uint16_t
dummy_event_tx_adapter_enqueue(__rte_unused void *port,
__rte_unused struct rte_event ev[],
__rte_unused uint16_t nb_events)
{
RTE_EDEV_LOG_ERR(
"event Tx adapter enqueue requested for unconfigured event device");
return 0;
}
static uint16_t
dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
__rte_unused struct rte_event ev[],
__rte_unused uint16_t nb_events)
{
RTE_EDEV_LOG_ERR(
"event Tx adapter enqueue same destination requested for unconfigured event device");
return 0;
}
static uint16_t
dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
__rte_unused struct rte_event ev[],
__rte_unused uint16_t nb_events)
{
RTE_EDEV_LOG_ERR(
"event crypto adapter enqueue requested for unconfigured event device");
return 0;
}
void
event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
{
static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
static const struct rte_event_fp_ops dummy = {
.enqueue = dummy_event_enqueue,
.enqueue_burst = dummy_event_enqueue_burst,
.enqueue_new_burst = dummy_event_enqueue_burst,
.enqueue_forward_burst = dummy_event_enqueue_burst,
.dequeue = dummy_event_dequeue,
.dequeue_burst = dummy_event_dequeue_burst,
.txa_enqueue = dummy_event_tx_adapter_enqueue,
.txa_enqueue_same_dest =
dummy_event_tx_adapter_enqueue_same_dest,
.ca_enqueue = dummy_event_crypto_adapter_enqueue,
.data = dummy_data,
};
*fp_op = dummy;
}
void
event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
const struct rte_eventdev *dev)
{
fp_op->enqueue = dev->enqueue;
fp_op->enqueue_burst = dev->enqueue_burst;
fp_op->enqueue_new_burst = dev->enqueue_new_burst;
fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
fp_op->dequeue = dev->dequeue;
fp_op->dequeue_burst = dev->dequeue_burst;
fp_op->txa_enqueue = dev->txa_enqueue;
fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
fp_op->ca_enqueue = dev->ca_enqueue;
fp_op->data = dev->data->ports;
}

View File

@ -8,24 +8,25 @@ else
endif
sources = files(
'rte_eventdev.c',
'rte_event_ring.c',
'eventdev_private.c',
'eventdev_trace_points.c',
'rte_event_eth_rx_adapter.c',
'rte_event_timer_adapter.c',
'rte_event_crypto_adapter.c',
'rte_event_eth_rx_adapter.c',
'rte_event_eth_tx_adapter.c',
'rte_event_ring.c',
'rte_event_timer_adapter.c',
'rte_eventdev.c',
)
headers = files(
'rte_event_crypto_adapter.h',
'rte_event_eth_rx_adapter.h',
'rte_event_eth_tx_adapter.h',
'rte_event_ring.h',
'rte_event_timer_adapter.h',
'rte_event_timer_adapter_pmd.h',
'rte_eventdev.h',
'rte_eventdev_trace.h',
'rte_eventdev_trace_fp.h',
'rte_event_ring.h',
'rte_event_eth_rx_adapter.h',
'rte_event_timer_adapter.h',
'rte_event_timer_adapter_pmd.h',
'rte_event_crypto_adapter.h',
'rte_event_eth_tx_adapter.h',
)
indirect_headers += files(
'rte_eventdev_core.h',

View File

@ -46,6 +46,9 @@ static struct rte_eventdev_global eventdev_globals = {
.nb_devs = 0
};
/* Public fastpath APIs. */
struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
/* Event dev north bound API implementation */
uint8_t
@ -300,8 +303,8 @@ int
rte_event_dev_configure(uint8_t dev_id,
const struct rte_event_dev_config *dev_conf)
{
struct rte_eventdev *dev;
struct rte_event_dev_info info;
struct rte_eventdev *dev;
int diag;
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@ -470,10 +473,13 @@ rte_event_dev_configure(uint8_t dev_id,
return diag;
}
event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
/* Configure the device */
diag = (*dev->dev_ops->dev_configure)(dev);
if (diag != 0) {
RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
event_dev_queue_config(dev, 0);
event_dev_port_config(dev, 0);
}
@ -1244,6 +1250,8 @@ rte_event_dev_start(uint8_t dev_id)
else
return diag;
event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
return 0;
}
@ -1284,6 +1292,7 @@ rte_event_dev_stop(uint8_t dev_id)
dev->data->dev_started = 0;
(*dev->dev_ops->dev_stop)(dev);
rte_eventdev_trace_stop(dev_id);
event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
}
int
@ -1302,6 +1311,7 @@ rte_event_dev_close(uint8_t dev_id)
return -EBUSY;
}
event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
rte_eventdev_trace_close(dev_id);
return (*dev->dev_ops->dev_close)(dev);
}
@ -1435,6 +1445,7 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
if (eventdev == NULL)
return -EINVAL;
event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
eventdev->attached = RTE_EVENTDEV_DETACHED;
eventdev_globals.nb_devs--;
@ -1460,6 +1471,15 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
return 0;
}
void
event_dev_probing_finish(struct rte_eventdev *eventdev)
{
if (eventdev == NULL)
return;
event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
eventdev);
}
static int
handle_dev_list(const char *cmd __rte_unused,

View File

@ -39,6 +39,32 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
uint16_t nb_events);
/**< @internal Enqueue burst of events on crypto adapter */
struct rte_event_fp_ops {
void **data;
/**< points to array of internal port data pointers */
event_enqueue_t enqueue;
/**< PMD enqueue function. */
event_enqueue_burst_t enqueue_burst;
/**< PMD enqueue burst function. */
event_enqueue_burst_t enqueue_new_burst;
/**< PMD enqueue burst new function. */
event_enqueue_burst_t enqueue_forward_burst;
/**< PMD enqueue burst fwd function. */
event_dequeue_t dequeue;
/**< PMD dequeue function. */
event_dequeue_burst_t dequeue_burst;
/**< PMD dequeue burst function. */
event_tx_adapter_enqueue_t txa_enqueue;
/**< PMD Tx adapter enqueue function. */
event_tx_adapter_enqueue_t txa_enqueue_same_dest;
/**< PMD Tx adapter enqueue same destination function. */
event_crypto_adapter_enqueue_t ca_enqueue;
/**< PMD Crypto adapter enqueue function. */
uintptr_t reserved[6];
} __rte_cache_aligned;
extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
#define RTE_EVENTDEV_NAME_MAX_LEN (64)
/**< @internal Max length of name of event PMD */

View File

@ -85,6 +85,9 @@ DPDK_22 {
rte_event_timer_cancel_burst;
rte_eventdevs;
#added in 21.11
rte_event_fp_ops;
local: *;
};
@ -143,6 +146,9 @@ EXPERIMENTAL {
INTERNAL {
global:
event_dev_fp_ops_reset;
event_dev_fp_ops_set;
event_dev_probing_finish;
rte_event_pmd_selftest_seqn_dynfield_offset;
rte_event_pmd_allocate;
rte_event_pmd_get_named_dev;