event/dsw: avoid credit leak on oversized enqueue bursts

If an application issues rte_event_enqueue_new_burst() or
rte_event_enqueue_forward_burst() call with a burst of events longer
than the configured max enqueue burst size, DSW allocates credits not
only for events actually enqueued, but for the complete burst. If this
process is repeated, enough credits will have leaked to cause the
event device to backpressure (i.e. disallow) any new enqueue
operations.

In addition, the port-level enqueue xstats will log the wrong number
of events enqueued for oversized enqueues.

This patch makes DSW gracefully handle oversized enqueue bursts.

Fixes: 1c8e3caa3bfb ("event/dsw: add event scheduling and device start/stop")
Cc: stable@dpdk.org

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
This commit is contained in:
Mattias Rönnblom 2020-01-14 19:03:38 +01:00 committed by Jerin Jacob
parent 345a22d5ec
commit 0c4155c7b5

View File

@ -1018,12 +1018,12 @@ dsw_event_enqueue(void *port, const struct rte_event *ev)
} }
static __rte_always_inline uint16_t static __rte_always_inline uint16_t
dsw_event_enqueue_burst_generic(void *port, const struct rte_event events[], dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
const struct rte_event events[],
uint16_t events_len, bool op_types_known, uint16_t events_len, bool op_types_known,
uint16_t num_new, uint16_t num_release, uint16_t num_new, uint16_t num_release,
uint16_t num_non_release) uint16_t num_non_release)
{ {
struct dsw_port *source_port = port;
struct dsw_evdev *dsw = source_port->dsw; struct dsw_evdev *dsw = source_port->dsw;
bool enough_credits; bool enough_credits;
uint16_t i; uint16_t i;
@ -1047,13 +1047,10 @@ dsw_event_enqueue_burst_generic(void *port, const struct rte_event events[],
*/ */
if (unlikely(events_len == 0)) { if (unlikely(events_len == 0)) {
dsw_port_note_op(source_port, DSW_MAX_PORT_OPS_PER_BG_TASK); dsw_port_note_op(source_port, DSW_MAX_PORT_OPS_PER_BG_TASK);
dsw_port_flush_out_buffers(dsw, port); dsw_port_flush_out_buffers(dsw, source_port);
return 0; return 0;
} }
if (unlikely(events_len > source_port->enqueue_depth))
events_len = source_port->enqueue_depth;
dsw_port_note_op(source_port, events_len); dsw_port_note_op(source_port, events_len);
if (!op_types_known) if (!op_types_known)
@ -1109,24 +1106,41 @@ uint16_t
dsw_event_enqueue_burst(void *port, const struct rte_event events[], dsw_event_enqueue_burst(void *port, const struct rte_event events[],
uint16_t events_len) uint16_t events_len)
{ {
return dsw_event_enqueue_burst_generic(port, events, events_len, false, struct dsw_port *source_port = port;
0, 0, 0);
if (unlikely(events_len > source_port->enqueue_depth))
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
events_len, false, 0, 0, 0);
} }
uint16_t uint16_t
dsw_event_enqueue_new_burst(void *port, const struct rte_event events[], dsw_event_enqueue_new_burst(void *port, const struct rte_event events[],
uint16_t events_len) uint16_t events_len)
{ {
return dsw_event_enqueue_burst_generic(port, events, events_len, true, struct dsw_port *source_port = port;
events_len, 0, events_len);
if (unlikely(events_len > source_port->enqueue_depth))
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
events_len, true, events_len,
0, events_len);
} }
uint16_t uint16_t
dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[], dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[],
uint16_t events_len) uint16_t events_len)
{ {
return dsw_event_enqueue_burst_generic(port, events, events_len, true, struct dsw_port *source_port = port;
0, 0, events_len);
if (unlikely(events_len > source_port->enqueue_depth))
events_len = source_port->enqueue_depth;
return dsw_event_enqueue_burst_generic(source_port, events,
events_len, true, 0, 0,
events_len);
} }
uint16_t uint16_t