2017-12-19 15:49:01 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2016-2017 Intel Corporation
|
2017-03-30 20:30:38 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_cycles.h>
|
2017-06-30 16:06:21 +01:00
|
|
|
#include <rte_event_ring.h>
|
2017-03-30 20:30:38 +01:00
|
|
|
|
|
|
|
#include "sw_evdev.h"
|
|
|
|
|
|
|
|
#define PORT_ENQUEUE_MAX_BURST_SIZE 64
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
sw_event_release(struct sw_port *p, uint8_t index)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Drops the next outstanding event in our history. Used on dequeue
|
|
|
|
* to clear any history before dequeuing more events.
|
|
|
|
*/
|
|
|
|
RTE_SET_USED(index);
|
|
|
|
|
|
|
|
/* create drop message */
|
2017-04-06 18:05:11 +01:00
|
|
|
struct rte_event ev;
|
|
|
|
ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
|
2017-03-30 20:30:38 +01:00
|
|
|
|
|
|
|
uint16_t free_count;
|
2017-06-30 16:06:21 +01:00
|
|
|
rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
|
2017-03-30 20:30:38 +01:00
|
|
|
|
|
|
|
/* each release returns one credit */
|
|
|
|
p->outstanding_releases--;
|
|
|
|
p->inflight_credits++;
|
|
|
|
}
|
|
|
|
|
2017-06-30 16:06:21 +01:00
|
|
|
/*
|
|
|
|
* special-case of rte_event_ring enqueue, with overriding the ops member on
|
|
|
|
* the events that get written to the ring.
|
|
|
|
*/
|
|
|
|
static inline unsigned int
|
|
|
|
enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
|
|
|
|
unsigned int n, uint8_t *ops)
|
|
|
|
{
|
|
|
|
struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
memcpy(tmp_evs, events, n * sizeof(events[0]));
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
tmp_evs[i].op = ops[i];
|
|
|
|
|
|
|
|
return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
|
|
|
|
}
|
|
|
|
|
2017-03-30 20:30:38 +01:00
|
|
|
uint16_t
|
|
|
|
sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
|
|
|
|
{
|
|
|
|
int32_t i;
|
|
|
|
uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
|
|
|
|
struct sw_port *p = port;
|
|
|
|
struct sw_evdev *sw = (void *)p->sw;
|
|
|
|
uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
|
2017-12-11 11:56:32 -06:00
|
|
|
uint32_t credit_update_quanta = sw->credit_update_quanta;
|
2017-09-08 13:07:52 -05:00
|
|
|
int new = 0;
|
2017-03-30 20:30:38 +01:00
|
|
|
|
|
|
|
if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
|
|
|
|
num = PORT_ENQUEUE_MAX_BURST_SIZE;
|
|
|
|
|
2017-09-08 13:07:52 -05:00
|
|
|
for (i = 0; i < num; i++)
|
|
|
|
new += (ev[i].op == RTE_EVENT_OP_NEW);
|
|
|
|
|
|
|
|
if (unlikely(new > 0 && p->inflight_max < sw_inflights))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (p->inflight_credits < new) {
|
2017-03-30 20:30:38 +01:00
|
|
|
/* check if event enqueue brings port over max threshold */
|
|
|
|
if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rte_atomic32_add(&sw->inflights, credit_update_quanta);
|
|
|
|
p->inflight_credits += (credit_update_quanta);
|
|
|
|
|
2018-03-12 09:55:22 -05:00
|
|
|
/* If there are fewer inflight credits than new events, limit
|
|
|
|
* the number of enqueued events.
|
|
|
|
*/
|
|
|
|
num = (p->inflight_credits < new) ? p->inflight_credits : new;
|
2017-03-30 20:30:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
int op = ev[i].op;
|
|
|
|
int outstanding = p->outstanding_releases > 0;
|
|
|
|
const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
|
|
|
|
|
|
|
|
p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
|
|
|
|
p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
|
|
|
|
outstanding;
|
|
|
|
|
|
|
|
new_ops[i] = sw_qe_flag_map[op];
|
|
|
|
new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
|
|
|
|
|
|
|
|
/* FWD and RELEASE packets will both resolve to taken (assuming
|
|
|
|
* correct usage of the API), providing very high correct
|
|
|
|
* prediction rate.
|
|
|
|
*/
|
2017-12-11 11:56:32 -06:00
|
|
|
if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding)
|
2017-03-30 20:30:38 +01:00
|
|
|
p->outstanding_releases--;
|
2017-04-18 10:58:40 +01:00
|
|
|
|
|
|
|
/* error case: branch to avoid touching p->stats */
|
2017-12-11 11:56:32 -06:00
|
|
|
if (unlikely(invalid_qid && op != RTE_EVENT_OP_RELEASE)) {
|
2017-03-30 20:30:38 +01:00
|
|
|
p->stats.rx_dropped++;
|
2017-04-18 10:58:40 +01:00
|
|
|
p->inflight_credits++;
|
|
|
|
}
|
2017-03-30 20:30:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* returns number of events actually enqueued */
|
2017-06-30 16:06:21 +01:00
|
|
|
uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
|
2017-03-30 20:30:38 +01:00
|
|
|
new_ops);
|
|
|
|
if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
|
|
|
|
uint64_t burst_ticks = rte_get_timer_cycles() -
|
|
|
|
p->last_dequeue_ticks;
|
|
|
|
uint64_t burst_pkt_ticks =
|
|
|
|
burst_ticks / p->last_dequeue_burst_sz;
|
|
|
|
p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
|
|
|
|
p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
|
|
|
|
p->last_dequeue_ticks = 0;
|
|
|
|
}
|
2017-12-11 11:56:32 -06:00
|
|
|
|
|
|
|
/* Replenish credits if enough releases are performed */
|
|
|
|
if (p->inflight_credits >= credit_update_quanta * 2) {
|
|
|
|
rte_atomic32_sub(&sw->inflights, credit_update_quanta);
|
|
|
|
p->inflight_credits -= credit_update_quanta;
|
|
|
|
}
|
|
|
|
|
2017-03-30 20:30:38 +01:00
|
|
|
return enq;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
sw_event_enqueue(void *port, const struct rte_event *ev)
|
|
|
|
{
|
|
|
|
return sw_event_enqueue_burst(port, ev, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
|
|
|
|
uint64_t wait)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(wait);
|
|
|
|
struct sw_port *p = (void *)port;
|
2017-06-30 16:06:21 +01:00
|
|
|
struct rte_event_ring *ring = p->cq_worker_ring;
|
2017-03-30 20:30:38 +01:00
|
|
|
|
|
|
|
/* check that all previous dequeues have been released */
|
2017-12-11 11:56:32 -06:00
|
|
|
if (p->implicit_release) {
|
|
|
|
struct sw_evdev *sw = (void *)p->sw;
|
|
|
|
uint32_t credit_update_quanta = sw->credit_update_quanta;
|
2017-03-30 20:30:38 +01:00
|
|
|
uint16_t out_rels = p->outstanding_releases;
|
|
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < out_rels; i++)
|
|
|
|
sw_event_release(p, i);
|
2017-12-11 11:56:32 -06:00
|
|
|
|
|
|
|
/* Replenish credits if enough releases are performed */
|
|
|
|
if (p->inflight_credits >= credit_update_quanta * 2) {
|
|
|
|
rte_atomic32_sub(&sw->inflights, credit_update_quanta);
|
|
|
|
p->inflight_credits -= credit_update_quanta;
|
|
|
|
}
|
2017-03-30 20:30:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* returns number of events actually dequeued */
|
2017-06-30 16:06:21 +01:00
|
|
|
uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
|
2017-03-30 20:30:38 +01:00
|
|
|
if (unlikely(ndeq == 0)) {
|
|
|
|
p->zero_polls++;
|
|
|
|
p->total_polls++;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2017-12-11 11:56:31 -06:00
|
|
|
p->outstanding_releases += ndeq;
|
2017-03-30 20:30:38 +01:00
|
|
|
p->last_dequeue_burst_sz = ndeq;
|
|
|
|
p->last_dequeue_ticks = rte_get_timer_cycles();
|
|
|
|
p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
|
|
|
|
p->total_polls++;
|
|
|
|
|
|
|
|
end:
|
|
|
|
return ndeq;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
|
|
|
|
{
|
|
|
|
return sw_event_dequeue_burst(port, ev, 1, wait);
|
|
|
|
}
|