event/sw: change worker rings to standard event rings
Now that we have a standard event ring implementation for passing events core-to-core, use that in place of the custom event rings in the software eventdev. Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com> Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
This commit is contained in:
parent
1ee55d7a6e
commit
86aed50aad
@ -38,10 +38,10 @@
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_ring.h>
|
||||
#include <rte_errno.h>
|
||||
#include <rte_event_ring.h>
|
||||
|
||||
#include "sw_evdev.h"
|
||||
#include "iq_ring.h"
|
||||
#include "event_ring.h"
|
||||
|
||||
#define EVENTDEV_NAME_SW_PMD event_sw
|
||||
#define NUMA_NODE_ARG "numa_node"
|
||||
@ -140,7 +140,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
|
||||
{
|
||||
struct sw_evdev *sw = sw_pmd_priv(dev);
|
||||
struct sw_port *p = &sw->ports[port_id];
|
||||
char buf[QE_RING_NAMESIZE];
|
||||
char buf[RTE_RING_NAMESIZE];
|
||||
unsigned int i;
|
||||
|
||||
struct rte_event_dev_info info;
|
||||
@ -161,10 +161,19 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
|
||||
p->id = port_id;
|
||||
p->sw = sw;
|
||||
|
||||
snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
|
||||
"rx_worker_ring");
|
||||
p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
|
||||
dev->data->socket_id);
|
||||
/* check to see if rings exists - port_setup() can be called multiple
|
||||
* times legally (assuming device is stopped). If ring exists, free it
|
||||
* to so it gets re-created with the correct size
|
||||
*/
|
||||
snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
|
||||
port_id, "rx_worker_ring");
|
||||
struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
|
||||
if (existing_ring)
|
||||
rte_event_ring_free(existing_ring);
|
||||
|
||||
p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
|
||||
dev->data->socket_id,
|
||||
RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
|
||||
if (p->rx_worker_ring == NULL) {
|
||||
SW_LOG_ERR("Error creating RX worker ring for port %d\n",
|
||||
port_id);
|
||||
@ -173,12 +182,18 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
|
||||
|
||||
p->inflight_max = conf->new_event_threshold;
|
||||
|
||||
snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
|
||||
"cq_worker_ring");
|
||||
p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
|
||||
dev->data->socket_id);
|
||||
/* check if ring exists, same as rx_worker above */
|
||||
snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
|
||||
port_id, "cq_worker_ring");
|
||||
existing_ring = rte_event_ring_lookup(buf);
|
||||
if (existing_ring)
|
||||
rte_event_ring_free(existing_ring);
|
||||
|
||||
p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
|
||||
dev->data->socket_id,
|
||||
RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
|
||||
if (p->cq_worker_ring == NULL) {
|
||||
qe_ring_destroy(p->rx_worker_ring);
|
||||
rte_event_ring_free(p->rx_worker_ring);
|
||||
SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
|
||||
port_id);
|
||||
return -1;
|
||||
@ -204,8 +219,8 @@ sw_port_release(void *port)
|
||||
if (p == NULL)
|
||||
return;
|
||||
|
||||
qe_ring_destroy(p->rx_worker_ring);
|
||||
qe_ring_destroy(p->cq_worker_ring);
|
||||
rte_event_ring_free(p->rx_worker_ring);
|
||||
rte_event_ring_free(p->cq_worker_ring);
|
||||
memset(p, 0, sizeof(*p));
|
||||
}
|
||||
|
||||
@ -512,8 +527,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
|
||||
fprintf(f, "\n");
|
||||
|
||||
if (p->rx_worker_ring) {
|
||||
uint64_t used = qe_ring_count(p->rx_worker_ring);
|
||||
uint64_t space = qe_ring_free_count(p->rx_worker_ring);
|
||||
uint64_t used = rte_event_ring_count(p->rx_worker_ring);
|
||||
uint64_t space = rte_event_ring_free_count(
|
||||
p->rx_worker_ring);
|
||||
const char *col = (space == 0) ? COL_RED : COL_RESET;
|
||||
fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
|
||||
PRIu64 COL_RESET"\n", col, used, space);
|
||||
@ -521,8 +537,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
|
||||
fprintf(f, "\trx ring not initialized.\n");
|
||||
|
||||
if (p->cq_worker_ring) {
|
||||
uint64_t used = qe_ring_count(p->cq_worker_ring);
|
||||
uint64_t space = qe_ring_free_count(p->cq_worker_ring);
|
||||
uint64_t used = rte_event_ring_count(p->cq_worker_ring);
|
||||
uint64_t space = rte_event_ring_free_count(
|
||||
p->cq_worker_ring);
|
||||
const char *col = (space == 0) ? COL_RED : COL_RESET;
|
||||
fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
|
||||
PRIu64 COL_RESET"\n", col, used, space);
|
||||
|
@ -190,9 +190,9 @@ struct sw_port {
|
||||
int16_t num_ordered_qids;
|
||||
|
||||
/** Ring and buffer for pulling events from workers for scheduling */
|
||||
struct qe_ring *rx_worker_ring __rte_cache_aligned;
|
||||
struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
|
||||
/** Ring and buffer for pushing packets to workers after scheduling */
|
||||
struct qe_ring *cq_worker_ring;
|
||||
struct rte_event_ring *cq_worker_ring;
|
||||
|
||||
/* hole */
|
||||
|
||||
|
@ -32,9 +32,9 @@
|
||||
|
||||
#include <rte_ring.h>
|
||||
#include <rte_hash_crc.h>
|
||||
#include <rte_event_ring.h>
|
||||
#include "sw_evdev.h"
|
||||
#include "iq_ring.h"
|
||||
#include "event_ring.h"
|
||||
|
||||
#define SW_IQS_MASK (SW_IQS_MAX-1)
|
||||
|
||||
@ -123,8 +123,8 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
|
||||
|
||||
/* if we just filled in the last slot, flush the buffer */
|
||||
if (sw->cq_ring_space[cq] == 0) {
|
||||
struct qe_ring *worker = p->cq_worker_ring;
|
||||
qe_ring_enqueue_burst(worker, p->cq_buf,
|
||||
struct rte_event_ring *worker = p->cq_worker_ring;
|
||||
rte_event_ring_enqueue_burst(worker, p->cq_buf,
|
||||
p->cq_buf_count,
|
||||
&sw->cq_ring_space[cq]);
|
||||
p->cq_buf_count = 0;
|
||||
@ -171,7 +171,8 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
|
||||
cq = qid->cq_map[cq_idx];
|
||||
if (++cq_idx == qid->cq_num_mapped_cqs)
|
||||
cq_idx = 0;
|
||||
} while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
|
||||
} while (rte_event_ring_free_count(
|
||||
sw->ports[cq].cq_worker_ring) == 0 ||
|
||||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
|
||||
|
||||
struct sw_port *p = &sw->ports[cq];
|
||||
@ -367,10 +368,10 @@ static __rte_always_inline void
|
||||
sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
|
||||
{
|
||||
RTE_SET_USED(sw);
|
||||
struct qe_ring *worker = port->rx_worker_ring;
|
||||
struct rte_event_ring *worker = port->rx_worker_ring;
|
||||
port->pp_buf_start = 0;
|
||||
port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
|
||||
RTE_DIM(port->pp_buf));
|
||||
port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
|
||||
RTE_DIM(port->pp_buf), NULL);
|
||||
}
|
||||
|
||||
static __rte_always_inline uint32_t
|
||||
@ -586,8 +587,8 @@ sw_event_schedule(struct rte_eventdev *dev)
|
||||
* worker cores: aka, do the ring transfers batched.
|
||||
*/
|
||||
for (i = 0; i < sw->port_count; i++) {
|
||||
struct qe_ring *worker = sw->ports[i].cq_worker_ring;
|
||||
qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
|
||||
struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
|
||||
rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
|
||||
sw->ports[i].cq_buf_count,
|
||||
&sw->cq_ring_space[i]);
|
||||
sw->ports[i].cq_buf_count = 0;
|
||||
|
@ -32,9 +32,9 @@
|
||||
|
||||
#include <rte_atomic.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_event_ring.h>
|
||||
|
||||
#include "sw_evdev.h"
|
||||
#include "event_ring.h"
|
||||
|
||||
#define PORT_ENQUEUE_MAX_BURST_SIZE 64
|
||||
|
||||
@ -52,13 +52,31 @@ sw_event_release(struct sw_port *p, uint8_t index)
|
||||
ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
|
||||
|
||||
uint16_t free_count;
|
||||
qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
|
||||
rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
|
||||
|
||||
/* each release returns one credit */
|
||||
p->outstanding_releases--;
|
||||
p->inflight_credits++;
|
||||
}
|
||||
|
||||
/*
|
||||
* special-case of rte_event_ring enqueue, with overriding the ops member on
|
||||
* the events that get written to the ring.
|
||||
*/
|
||||
static inline unsigned int
|
||||
enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
|
||||
unsigned int n, uint8_t *ops)
|
||||
{
|
||||
struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
|
||||
unsigned int i;
|
||||
|
||||
memcpy(tmp_evs, events, n * sizeof(events[0]));
|
||||
for (i = 0; i < n; i++)
|
||||
tmp_evs[i].op = ops[i];
|
||||
|
||||
return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
|
||||
}
|
||||
|
||||
uint16_t
|
||||
sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
|
||||
{
|
||||
@ -119,7 +137,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
|
||||
p->inflight_credits -= forwards * p->is_directed;
|
||||
|
||||
/* returns number of events actually enqueued */
|
||||
uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
|
||||
uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
|
||||
new_ops);
|
||||
if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
|
||||
uint64_t burst_ticks = rte_get_timer_cycles() -
|
||||
@ -146,7 +164,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
|
||||
RTE_SET_USED(wait);
|
||||
struct sw_port *p = (void *)port;
|
||||
struct sw_evdev *sw = (void *)p->sw;
|
||||
struct qe_ring *ring = p->cq_worker_ring;
|
||||
struct rte_event_ring *ring = p->cq_worker_ring;
|
||||
uint32_t credit_update_quanta = sw->credit_update_quanta;
|
||||
|
||||
/* check that all previous dequeues have been released */
|
||||
@ -158,7 +176,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
|
||||
}
|
||||
|
||||
/* returns number of events actually dequeued */
|
||||
uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
|
||||
uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
|
||||
if (unlikely(ndeq == 0)) {
|
||||
p->outstanding_releases = 0;
|
||||
p->zero_polls++;
|
||||
|
@ -30,9 +30,9 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <rte_event_ring.h>
|
||||
#include "sw_evdev.h"
|
||||
#include "iq_ring.h"
|
||||
#include "event_ring.h"
|
||||
|
||||
enum xstats_type {
|
||||
/* common stats */
|
||||
@ -105,10 +105,10 @@ get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
|
||||
case calls: return p->total_polls;
|
||||
case credits: return p->inflight_credits;
|
||||
case poll_return: return p->zero_polls;
|
||||
case rx_used: return qe_ring_count(p->rx_worker_ring);
|
||||
case rx_free: return qe_ring_free_count(p->rx_worker_ring);
|
||||
case tx_used: return qe_ring_count(p->cq_worker_ring);
|
||||
case tx_free: return qe_ring_free_count(p->cq_worker_ring);
|
||||
case rx_used: return rte_event_ring_count(p->rx_worker_ring);
|
||||
case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
|
||||
case tx_used: return rte_event_ring_count(p->cq_worker_ring);
|
||||
case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
@ -318,8 +318,9 @@ sw_xstats_init(struct sw_evdev *sw)
|
||||
port, port_stats[i]);
|
||||
}
|
||||
|
||||
for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
|
||||
SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
|
||||
for (bkt = 0; bkt < (rte_event_ring_get_capacity(
|
||||
sw->ports[port].cq_worker_ring) >>
|
||||
SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
|
||||
for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
|
||||
sw->xstats[stat] = (struct sw_xstats_entry){
|
||||
.fn = get_port_bucket_stat,
|
||||
|
Loading…
Reference in New Issue
Block a user