eventdev: express DLB/DLB2 PMD constraints

This commit implements the eventdev ABI changes required by
the DLB/DLB2 PMDs.  Several data structures and constants are modified
or added in this patch, thereby requiring modifications to the
dependent apps and examples.

The DLB/DLB2 hardware does not conform exactly to the eventdev interface.
1) It has a limit on the number of queues that may be linked to a port.
2) Some ports a further restricted to a maximum of 1 linked queue.
3) DLB does not have the ability to carry the flow_id as part
   of the event (QE) payload. Note that the DLB2 hardware is capable of
   carrying the flow_id.

Following is a detailed description of the changes that have been made.

1) Add new fields to the rte_event_dev_info struct. These fields allow
the device to advertise its capabilities so that applications can take
the appropriate actions based on those capabilities.

    struct rte_event_dev_info {
	uint32_t max_event_port_links;
	/**< Maximum number of queues that can be linked to a single event
	 * port by this device.
	 */

	uint8_t max_single_link_event_port_queue_pairs;
	/**< Maximum number of event ports and queues that are optimized for
	 * (and only capable of) single-link configurations supported by this
	 * device. These ports and queues are not accounted for in
	 * max_event_ports or max_event_queues.
	 */
    }

2) Add a new field to the rte_event_dev_config struct. This field allows
the application to specify how many of its ports are limited to a single
link, or will be used in single link mode.

    /** Event device configuration structure */
    struct rte_event_dev_config {
	uint8_t nb_single_link_event_port_queues;
	/**< Number of event ports and queues that will be singly-linked to
	 * each other. These are a subset of the overall event ports and
	 * queues; this value cannot exceed *nb_event_ports* or
	 * *nb_event_queues*. If the device has ports and queues that are
	 * optimized for single-link usage, this field is a hint for how many
	 * to allocate; otherwise, regular event ports and queues can be used.
	 */
    }

3) Replace the dedicated implicit_release_disabled field with a bit field
of explicit port capabilities. The implicit_release_disable functionality
is assigned to one bit, and a port-is-single-link-only  attribute is
assigned to other, with the remaining bits available for future assignment.

	* Event port configuration bitmap flags */
	#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
	/**< Configure the port not to release outstanding events in
	 * rte_event_dev_dequeue_burst(). If set, all events received through
	 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
	 * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
	 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
	 */
	#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)

	/**< This event port links only to a single event queue.
	 *
	 *  @see rte_event_port_setup(), rte_event_port_link()
	 */

	#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
	/**
	 * The implicit release disable attribute of the port
	 */

	struct rte_event_port_conf {
		uint32_t event_port_cfg;
		/**< Port cfg flags(EVENT_PORT_CFG_) */
	}

This patch also removes the depreciation notice and announce
the new eventdev ABI changes in release note.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
Acked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Timothy McDaniel 2020-10-15 13:07:15 -05:00 committed by Jerin Jacob
parent 70207f35e2
commit 75d113136f
28 changed files with 218 additions and 75 deletions

View File

@ -104,6 +104,16 @@ evt_has_all_types_queue(uint8_t dev_id)
true : false;
}
static inline bool
evt_has_flow_id(uint8_t dev_id)
{
struct rte_event_dev_info dev_info;
rte_event_dev_info_get(dev_id, &dev_info);
return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
true : false;
}
static inline int
evt_service_setup(uint32_t service_id)
{
@ -169,6 +179,7 @@ evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
.dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_single_link_event_port_queues = 0,
.nb_events_limit = info.max_num_events,
.nb_event_queue_flows = opt->nb_flows,
.nb_event_port_dequeue_depth =

View File

@ -19,7 +19,7 @@ order_atq_process_stage_0(struct rte_event *const ev)
}
static int
order_atq_worker(void *arg)
order_atq_worker(void *arg, const bool flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev;
@ -34,6 +34,9 @@ order_atq_worker(void *arg)
continue;
}
if (!flow_id_cap)
ev.flow_id = ev.mbuf->udata64;
if (ev.sub_event_type == 0) { /* stage 0 from producer */
order_atq_process_stage_0(&ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@ -50,7 +53,7 @@ order_atq_worker(void *arg)
}
static int
order_atq_worker_burst(void *arg)
order_atq_worker_burst(void *arg, const bool flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev[BURST_SIZE];
@ -68,6 +71,9 @@ order_atq_worker_burst(void *arg)
}
for (i = 0; i < nb_rx; i++) {
if (!flow_id_cap)
ev[i].flow_id = ev[i].mbuf->udata64;
if (ev[i].sub_event_type == 0) { /*stage 0 */
order_atq_process_stage_0(&ev[i]);
} else if (ev[i].sub_event_type == 1) { /* stage 1 */
@ -95,11 +101,19 @@ worker_wrapper(void *arg)
{
struct worker_data *w = arg;
const bool burst = evt_has_burst_mode(w->dev_id);
const bool flow_id_cap = evt_has_flow_id(w->dev_id);
if (burst)
return order_atq_worker_burst(arg);
else
return order_atq_worker(arg);
if (burst) {
if (flow_id_cap)
return order_atq_worker_burst(arg, true);
else
return order_atq_worker_burst(arg, false);
} else {
if (flow_id_cap)
return order_atq_worker(arg, true);
else
return order_atq_worker(arg, false);
}
}
static int

View File

@ -49,6 +49,7 @@ order_producer(void *arg)
const uint32_t flow = (uintptr_t)m % nb_flows;
/* Maintain seq number per flow */
m->seqn = producer_flow_seq[flow]++;
m->udata64 = flow;
ev.flow_id = flow;
ev.mbuf = m;

View File

@ -19,7 +19,7 @@ order_queue_process_stage_0(struct rte_event *const ev)
}
static int
order_queue_worker(void *arg)
order_queue_worker(void *arg, const bool flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev;
@ -34,6 +34,9 @@ order_queue_worker(void *arg)
continue;
}
if (!flow_id_cap)
ev.flow_id = ev.mbuf->udata64;
if (ev.queue_id == 0) { /* from ordered queue */
order_queue_process_stage_0(&ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@ -50,7 +53,7 @@ order_queue_worker(void *arg)
}
static int
order_queue_worker_burst(void *arg)
order_queue_worker_burst(void *arg, const bool flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev[BURST_SIZE];
@ -68,6 +71,10 @@ order_queue_worker_burst(void *arg)
}
for (i = 0; i < nb_rx; i++) {
if (!flow_id_cap)
ev[i].flow_id = ev[i].mbuf->udata64;
if (ev[i].queue_id == 0) { /* from ordered queue */
order_queue_process_stage_0(&ev[i]);
} else if (ev[i].queue_id == 1) {/* from atomic queue */
@ -95,11 +102,19 @@ worker_wrapper(void *arg)
{
struct worker_data *w = arg;
const bool burst = evt_has_burst_mode(w->dev_id);
const bool flow_id_cap = evt_has_flow_id(w->dev_id);
if (burst)
return order_queue_worker_burst(arg);
else
return order_queue_worker(arg);
if (burst) {
if (flow_id_cap)
return order_queue_worker_burst(arg, true);
else
return order_queue_worker_burst(arg, false);
} else {
if (flow_id_cap)
return order_queue_worker(arg, true);
else
return order_queue_worker(arg, false);
}
}
static int

View File

@ -559,10 +559,10 @@ test_eventdev_port_setup(void)
if (!(info.event_dev_cap &
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
pconf.enqueue_depth = info.max_event_port_enqueue_depth;
pconf.disable_implicit_release = 1;
pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
pconf.disable_implicit_release = 0;
pconf.event_port_cfg = 0;
}
ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,

View File

@ -177,19 +177,6 @@ Deprecation Notices
to one it means it represents IV, when is set to zero it means J0 is used
directly, in this case 16 bytes of J0 need to be passed.
* eventdev: Following structures will be modified to support DLB PMD
and future extensions:
- ``rte_event_dev_info``
- ``rte_event_dev_config``
- ``rte_event_port_conf``
Patches containing justification, documentation, and proposed modifications
can be found at:
- https://patches.dpdk.org/patch/71457/
- https://patches.dpdk.org/patch/71456/
* sched: To allow more traffic classes, flexible mapping of pipe queues to
traffic classes, and subport level configuration of pipes and queues
changes will be made to macros, data structures and API functions defined

View File

@ -474,6 +474,13 @@ ABI Changes
* ``ethdev`` internal functions are marked with ``__rte_internal`` tag.
* eventdev: Following structures are modified to support DLB/DLB2 PMDs
and future extensions:
* ``rte_event_dev_info``
* ``rte_event_dev_config``
* ``rte_event_port_conf``
* sched: Added new fields to ``struct rte_sched_subport_port_params``.

View File

@ -355,7 +355,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE;
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static int

View File

@ -406,7 +406,8 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
@ -536,7 +537,7 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
port_conf->enqueue_depth =
DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
port_conf->disable_implicit_release = 0;
port_conf->event_port_cfg = 0;
}
static int

View File

@ -224,7 +224,8 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
RTE_EVENT_DEV_CAP_NONSEQ_MODE|
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
};
}

View File

@ -152,7 +152,8 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE;
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
@ -218,7 +219,7 @@ ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->new_event_threshold = edev->max_num_events;
port_conf->dequeue_depth = 1;
port_conf->enqueue_depth = 1;
port_conf->disable_implicit_release = 0;
port_conf->event_port_cfg = 0;
}
static void

View File

@ -501,7 +501,8 @@ otx2_sso_info_get(struct rte_eventdev *event_dev,
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE;
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static void

View File

@ -374,7 +374,8 @@ opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
};
*info = evdev_opdl_info;

View File

@ -101,7 +101,8 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_num_events = (1ULL << 20);
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS;
RTE_EVENT_DEV_CAP_EVENT_QOS |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
static int
@ -209,7 +210,7 @@ skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->new_event_threshold = 32 * 1024;
port_conf->dequeue_depth = 16;
port_conf->enqueue_depth = 16;
port_conf->disable_implicit_release = 0;
port_conf->event_port_cfg = 0;
}
static void

View File

@ -179,7 +179,8 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
}
p->inflight_max = conf->new_event_threshold;
p->implicit_release = !conf->disable_implicit_release;
p->implicit_release = !(conf->event_port_cfg &
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
/* check if ring exists, same as rx_worker above */
snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@ -501,7 +502,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->new_event_threshold = 1024;
port_conf->dequeue_depth = 16;
port_conf->enqueue_depth = 16;
port_conf->disable_implicit_release = 0;
port_conf->event_port_cfg = 0;
}
static int
@ -608,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE),
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
};
*info = evdev_sw_info;

View File

@ -172,7 +172,6 @@ create_ports(struct test *t, int num_ports)
.new_event_threshold = 1024,
.dequeue_depth = 32,
.enqueue_depth = 64,
.disable_implicit_release = 0,
};
if (num_ports > MAX_PORTS)
return -1;
@ -1227,7 +1226,6 @@ port_reconfig_credits(struct test *t)
.new_event_threshold = 128,
.dequeue_depth = 32,
.enqueue_depth = 64,
.disable_implicit_release = 0,
};
if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
printf("%d Error setting up port\n", __LINE__);
@ -1317,7 +1315,6 @@ port_single_lb_reconfig(struct test *t)
.new_event_threshold = 128,
.dequeue_depth = 32,
.enqueue_depth = 64,
.disable_implicit_release = 0,
};
if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
printf("%d Error setting up port\n", __LINE__);
@ -3079,7 +3076,8 @@ worker_loopback(struct test *t, uint8_t disable_implicit_release)
* only be initialized once - and this needs to be set for multiple runs
*/
conf.new_event_threshold = 512;
conf.disable_implicit_release = disable_implicit_release;
conf.event_port_cfg = disable_implicit_release ?
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
if (rte_event_port_setup(evdev, 0, &conf) < 0) {
printf("Error setting up RX port\n");

View File

@ -129,6 +129,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_single_link_event_port_queues = 1,
.nb_events_limit = 4096,
.nb_event_queue_flows = 1024,
.nb_event_port_dequeue_depth = 128,
@ -143,7 +144,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
.schedule_type = cdata.queue_type,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
.nb_atomic_order_sequences = 1024,
};
struct rte_event_queue_conf tx_q_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
@ -167,7 +168,8 @@ setup_eventdev_generic(struct worker_data *worker_data)
disable_implicit_release = (dev_info.event_dev_cap &
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
wkr_p_conf.disable_implicit_release = disable_implicit_release;
wkr_p_conf.event_port_cfg = disable_implicit_release ?
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
if (dev_info.max_num_events < config.nb_events_limit)
config.nb_events_limit = dev_info.max_num_events;

View File

@ -436,6 +436,7 @@ setup_eventdev_worker_tx_enq(struct worker_data *worker_data)
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_single_link_event_port_queues = 0,
.nb_events_limit = 4096,
.nb_event_queue_flows = 1024,
.nb_event_port_dequeue_depth = 128,

View File

@ -126,8 +126,11 @@ l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
event_p_conf.disable_implicit_release =
evt_rsrc->disable_implicit_release;
event_p_conf.event_port_cfg = 0;
if (evt_rsrc->disable_implicit_release)
event_p_conf.event_port_cfg |=
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;

View File

@ -123,8 +123,10 @@ l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
event_p_conf.disable_implicit_release =
evt_rsrc->disable_implicit_release;
event_p_conf.event_port_cfg = 0;
if (evt_rsrc->disable_implicit_release)
event_p_conf.event_port_cfg |=
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
event_p_id++) {

View File

@ -115,8 +115,11 @@ l3fwd_event_port_setup_generic(void)
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
event_p_conf.disable_implicit_release =
evt_rsrc->disable_implicit_release;
event_p_conf.event_port_cfg = 0;
if (evt_rsrc->disable_implicit_release)
event_p_conf.event_port_cfg |=
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;

View File

@ -113,8 +113,10 @@ l3fwd_event_port_setup_internal_port(void)
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
event_p_conf.disable_implicit_release =
evt_rsrc->disable_implicit_release;
event_p_conf.event_port_cfg = 0;
if (evt_rsrc->disable_implicit_release)
event_p_conf.event_port_cfg |=
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
event_p_id++) {

View File

@ -286,7 +286,7 @@ txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
return ret;
}
pc->disable_implicit_release = 0;
pc->event_port_cfg = 0;
ret = rte_event_port_setup(dev_id, port_id, pc);
if (ret) {
RTE_EDEV_LOG_ERR("failed to setup event port %u\n",

View File

@ -438,9 +438,29 @@ rte_event_dev_configure(uint8_t dev_id,
dev_id);
return -EINVAL;
}
if (dev_conf->nb_event_queues > info.max_event_queues) {
RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
dev_id, dev_conf->nb_event_queues, info.max_event_queues);
if (dev_conf->nb_event_queues > info.max_event_queues +
info.max_single_link_event_port_queue_pairs) {
RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
dev_id, dev_conf->nb_event_queues,
info.max_event_queues,
info.max_single_link_event_port_queue_pairs);
return -EINVAL;
}
if (dev_conf->nb_event_queues -
dev_conf->nb_single_link_event_port_queues >
info.max_event_queues) {
RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
dev_id, dev_conf->nb_event_queues,
dev_conf->nb_single_link_event_port_queues,
info.max_event_queues);
return -EINVAL;
}
if (dev_conf->nb_single_link_event_port_queues >
dev_conf->nb_event_queues) {
RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
dev_id,
dev_conf->nb_single_link_event_port_queues,
dev_conf->nb_event_queues);
return -EINVAL;
}
@ -449,9 +469,31 @@ rte_event_dev_configure(uint8_t dev_id,
RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
return -EINVAL;
}
if (dev_conf->nb_event_ports > info.max_event_ports) {
RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
dev_id, dev_conf->nb_event_ports, info.max_event_ports);
if (dev_conf->nb_event_ports > info.max_event_ports +
info.max_single_link_event_port_queue_pairs) {
RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
dev_id, dev_conf->nb_event_ports,
info.max_event_ports,
info.max_single_link_event_port_queue_pairs);
return -EINVAL;
}
if (dev_conf->nb_event_ports -
dev_conf->nb_single_link_event_port_queues
> info.max_event_ports) {
RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
dev_id, dev_conf->nb_event_ports,
dev_conf->nb_single_link_event_port_queues,
info.max_event_ports);
return -EINVAL;
}
if (dev_conf->nb_single_link_event_port_queues >
dev_conf->nb_event_ports) {
RTE_EDEV_LOG_ERR(
"dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
dev_id,
dev_conf->nb_single_link_event_port_queues,
dev_conf->nb_event_ports);
return -EINVAL;
}
@ -738,7 +780,8 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
return -EINVAL;
}
if (port_conf && port_conf->disable_implicit_release &&
if (port_conf &&
(port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
!(dev->data->event_dev_cap &
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
RTE_EDEV_LOG_ERR(
@ -831,6 +874,14 @@ rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
break;
case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
{
uint32_t config;
config = dev->data->ports_cfg[port_id].event_port_cfg;
*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
break;
}
default:
return -EINVAL;
};

View File

@ -291,6 +291,12 @@ struct rte_event;
* single queue to each port or map a single queue to many port.
*/
#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
/**< Event device preserves the flow ID from the enqueued
* event to the dequeued event if the flag is set. Otherwise,
* the content of this field is implementation dependent.
*/
/* Event device priority levels */
#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
/**< Highest priority expressed across eventdev subsystem
@ -380,6 +386,10 @@ struct rte_event_dev_info {
* event port by this device.
* A device that does not support bulk enqueue will set this as 1.
*/
uint8_t max_event_port_links;
/**< Maximum number of queues that can be linked to a single event
* port by this device.
*/
int32_t max_num_events;
/**< A *closed system* event dev has a limit on the number of events it
* can manage at a time. An *open system* event dev does not have a
@ -387,6 +397,12 @@ struct rte_event_dev_info {
*/
uint32_t event_dev_cap;
/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
uint8_t max_single_link_event_port_queue_pairs;
/**< Maximum number of event ports and queues that are optimized for
* (and only capable of) single-link configurations supported by this
* device. These ports and queues are not accounted for in
* max_event_ports or max_event_queues.
*/
};
/**
@ -494,6 +510,14 @@ struct rte_event_dev_config {
*/
uint32_t event_dev_cfg;
/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
uint8_t nb_single_link_event_port_queues;
/**< Number of event ports and queues that will be singly-linked to
* each other. These are a subset of the overall event ports and
* queues; this value cannot exceed *nb_event_ports* or
* *nb_event_queues*. If the device has ports and queues that are
* optimized for single-link usage, this field is a hint for how many
* to allocate; otherwise, regular event ports and queues can be used.
*/
};
/**
@ -519,7 +543,6 @@ int
rte_event_dev_configure(uint8_t dev_id,
const struct rte_event_dev_config *dev_conf);
/* Event queue specific APIs */
/* Event queue configuration bitmap flags */
@ -671,6 +694,20 @@ rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
/* Event port specific APIs */
/* Event port configuration bitmap flags */
#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
/**< Configure the port not to release outstanding events in
* rte_event_dev_dequeue_burst(). If set, all events received through
* the port must be explicitly released with RTE_EVENT_OP_RELEASE or
* RTE_EVENT_OP_FORWARD. Must be unset if the device is not
* RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
*/
#define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
/**< This event port links only to a single event queue.
*
* @see rte_event_port_setup(), rte_event_port_link()
*/
/** Event port configuration structure */
struct rte_event_port_conf {
int32_t new_event_threshold;
@ -698,13 +735,7 @@ struct rte_event_port_conf {
* which previously supplied to rte_event_dev_configure().
* Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
uint8_t disable_implicit_release;
/**< Configure the port not to release outstanding events in
* rte_event_dev_dequeue_burst(). If true, all events received through
* the port must be explicitly released with RTE_EVENT_OP_RELEASE or
* RTE_EVENT_OP_FORWARD. Must be false when the device is not
* RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
*/
uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
};
/**
@ -769,6 +800,10 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
* The new event threshold of the port
*/
#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
/**
* The implicit release disable attribute of the port
*/
#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
/**
* Get an attribute from a port.

View File

@ -88,7 +88,6 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
return -ENXIO;
}
/**
* @internal
* Wrapper for use by pci drivers as a .remove function to detach a event

View File

@ -34,6 +34,7 @@ RTE_TRACE_POINT(
rte_trace_point_emit_u32(dev_conf->nb_event_port_dequeue_depth);
rte_trace_point_emit_u32(dev_conf->nb_event_port_enqueue_depth);
rte_trace_point_emit_u32(dev_conf->event_dev_cfg);
rte_trace_point_emit_u8(dev_conf->nb_single_link_event_port_queues);
rte_trace_point_emit_int(rc);
)
@ -59,7 +60,7 @@ RTE_TRACE_POINT(
rte_trace_point_emit_i32(port_conf->new_event_threshold);
rte_trace_point_emit_u16(port_conf->dequeue_depth);
rte_trace_point_emit_u16(port_conf->enqueue_depth);
rte_trace_point_emit_u8(port_conf->disable_implicit_release);
rte_trace_point_emit_u32(port_conf->event_port_cfg);
rte_trace_point_emit_int(rc);
)
@ -165,7 +166,7 @@ RTE_TRACE_POINT(
rte_trace_point_emit_i32(port_conf->new_event_threshold);
rte_trace_point_emit_u16(port_conf->dequeue_depth);
rte_trace_point_emit_u16(port_conf->enqueue_depth);
rte_trace_point_emit_u8(port_conf->disable_implicit_release);
rte_trace_point_emit_u32(port_conf->event_port_cfg);
rte_trace_point_emit_ptr(conf_cb);
rte_trace_point_emit_int(rc);
)
@ -257,7 +258,7 @@ RTE_TRACE_POINT(
rte_trace_point_emit_i32(port_conf->new_event_threshold);
rte_trace_point_emit_u16(port_conf->dequeue_depth);
rte_trace_point_emit_u16(port_conf->enqueue_depth);
rte_trace_point_emit_u8(port_conf->disable_implicit_release);
rte_trace_point_emit_u32(port_conf->event_port_cfg);
)
RTE_TRACE_POINT(

View File

@ -100,7 +100,6 @@ EXPERIMENTAL {
# added in 20.05
__rte_eventdev_trace_configure;
__rte_eventdev_trace_queue_setup;
__rte_eventdev_trace_port_setup;
__rte_eventdev_trace_port_link;
__rte_eventdev_trace_port_unlink;
__rte_eventdev_trace_start;
@ -134,4 +133,7 @@ EXPERIMENTAL {
__rte_eventdev_trace_crypto_adapter_queue_pair_del;
__rte_eventdev_trace_crypto_adapter_start;
__rte_eventdev_trace_crypto_adapter_stop;
# changed in 20.11
__rte_eventdev_trace_port_setup;
};