eventdev: negate maintenance capability flag

Replace RTE_EVENT_DEV_CAP_REQUIRES_MAINT, which signaled the need
for the application to call rte_event_maintain(), with
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, which does the opposite (i.e.,
signifies that the event device does not require maintenance).

This approach is more in line with how other eventdev hardware and/or
software limitations are handled in the Eventdev API.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Mattias Rönnblom 2021-11-10 12:32:10 +01:00 committed by Jerin Jacob
parent 572dce2bf9
commit bd99189724
12 changed files with 35 additions and 26 deletions

View File

@ -44,8 +44,8 @@ Port Maintenance
~~~~~~~~~~~~~~~~
The distributed software eventdev uses an internal signaling scheme
between the ports to achieve load balancing. Therefore, it sets the
``RTE_EVENT_DEV_CAP_REQUIRES_MAINT`` flag.
between the ports to achieve load balancing. Therefore, it does not
set the ``RTE_EVENT_DEV_CAP_MAINTENANCE_FREE`` flag.
During periods when the application thread using a particular port is
neither attempting to enqueue nor to dequeue events, it must

View File

@ -119,7 +119,8 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
}
int

View File

@ -66,7 +66,8 @@ static struct rte_event_dev_info evdev_dlb2_default_info = {
RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
};
struct process_local_port_data

View File

@ -356,7 +356,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
}
static int

View File

@ -408,7 +408,8 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
}

View File

@ -222,8 +222,7 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
RTE_EVENT_DEV_CAP_NONSEQ_MODE|
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID|
RTE_EVENT_DEV_CAP_REQUIRES_MAINT
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
};
}

View File

@ -155,7 +155,8 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
}

View File

@ -505,7 +505,8 @@ otx2_sso_info_get(struct rte_eventdev *event_dev,
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
}
static void

View File

@ -375,7 +375,8 @@ opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE,
};
*info = evdev_opdl_info;

View File

@ -102,7 +102,8 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
}
static int

View File

@ -609,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
};
*info = evdev_sw_info;

View File

@ -299,13 +299,14 @@ struct rte_event;
* the content of this field is implementation dependent.
*/
#define RTE_EVENT_DEV_CAP_REQUIRES_MAINT (1ULL << 10)
/**< Event device requires calls to rte_event_maintain() during
* periods when neither rte_event_dequeue_burst() nor
* rte_event_enqueue_burst() are called on a port. This will allow the
* event device to perform internal processing, such as flushing
* buffered events, return credits to a global pool, or process
* signaling related to load balancing.
#define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
/**< Event device *does not* require calls to rte_event_maintain().
* An event device that does not set this flag requires calls to
* rte_event_maintain() during periods when neither
* rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
* on a port. This will allow the event device to perform internal
* processing, such as flushing buffered events, return credits to a
* global pool, or process signaling related to load balancing.
*/
/* Event device priority levels */
@ -2082,8 +2083,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
/**
* Maintain an event device.
*
* This function is only relevant for event devices which have the
* @ref RTE_EVENT_DEV_CAP_REQUIRES_MAINT flag set. Such devices
* This function is only relevant for event devices which do not have
* the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
* require an application thread using a particular port to
* periodically call rte_event_maintain() on that port during periods
* which it is neither attempting to enqueue events to nor dequeue
@ -2098,9 +2099,9 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
* or dequeue functions are being called, at the cost of a slight
* increase in overhead.
*
* rte_event_maintain() may be called on event devices which haven't
* set @ref RTE_EVENT_DEV_CAP_REQUIRES_MAINT flag, in which case it is
* a no-operation.
* rte_event_maintain() may be called on event devices which have set
* @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
* no-operation.
*
* @param dev_id
* The identifier of the device.
@ -2112,7 +2113,7 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
* - 0 on success.
* - -EINVAL if *dev_id*, *port_id*, or *op* is invalid.
*
* @see RTE_EVENT_DEV_CAP_REQUIRES_MAINT
* @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
*/
__rte_experimental
static inline int