event/dpaa2: have separate structure to hold dqrr entries

This patch provides cleaner approach to store the DQRR entries,
which are yet to be consumed in case of atomic queues.

Also, this patch changes the storage of the DQRR entry index
into the mbuf->seqn instead of ev->opaque

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Nipun Gupta 2018-01-17 17:09:12 +05:30 committed by Jerin Jacob
parent 7c9e094fe5
commit 7b6edb640b
6 changed files with 36 additions and 14 deletions

View File

@ -35,6 +35,8 @@ rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type)
return rte_fslmc_bus.device_count[device_type];
}
RTE_DEFINE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
static void
cleanup_fslmc_device_list(void)
{

View File

@ -79,8 +79,6 @@ struct dpaa2_dpio_dev {
struct rte_intr_handle intr_handle; /* Interrupt related info */
int32_t epoll_fd; /**< File descriptor created for interrupt polling */
int32_t hw_id; /**< An unique ID of this DPIO device instance */
uint64_t dqrr_held;
uint8_t dqrr_size;
};
struct dpaa2_dpbp_dev {

View File

@ -95,6 +95,7 @@ DPDK_18.02 {
dpaa2_svr_family;
dpaa2_virt_mode;
per_lcore_dpaa2_held_bufs;
qbman_fq_query_state;
qbman_fq_state_frame_count;
qbman_swp_dqrr_idx_consume;

View File

@ -129,6 +129,24 @@ struct rte_fslmc_bus {
/**< Count of all devices scanned */
};
#define DPAA2_PORTAL_DEQUEUE_DEPTH 32
/* Create storage for dqrr entries per lcore */
struct dpaa2_portal_dqrr {
struct rte_mbuf *mbuf[DPAA2_PORTAL_DEQUEUE_DEPTH];
uint64_t dqrr_held;
uint8_t dqrr_size;
};
RTE_DECLARE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
#define DPAA2_PER_LCORE_DQRR_SIZE \
RTE_PER_LCORE(dpaa2_held_bufs).dqrr_size
#define DPAA2_PER_LCORE_DQRR_HELD \
RTE_PER_LCORE(dpaa2_held_bufs).dqrr_held
#define DPAA2_PER_LCORE_DQRR_MBUF(i) \
RTE_PER_LCORE(dpaa2_held_bufs).mbuf[i]
/**
* Register a DPAA2 driver.
*

View File

@ -99,13 +99,13 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
if (event->impl_opaque) {
uint8_t dqrr_index = event->impl_opaque - 1;
if (event->mbuf->seqn) {
uint8_t dqrr_index = event->mbuf->seqn - 1;
qbman_eq_desc_set_dca(&eqdesc[loop], 1,
dqrr_index, 0);
DPAA2_PER_LCORE_DPIO->dqrr_size--;
DPAA2_PER_LCORE_DPIO->dqrr_held &=
DPAA2_PER_LCORE_DQRR_SIZE--;
DPAA2_PER_LCORE_DQRR_HELD &=
~(1 << dqrr_index);
}
@ -207,9 +207,9 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
rte_free(ev_temp);
ev->impl_opaque = dqrr_index + 1;
DPAA2_PER_LCORE_DPIO->dqrr_size++;
DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
ev->mbuf->seqn = dqrr_index + 1;
DPAA2_PER_LCORE_DQRR_SIZE++;
DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
}
static uint16_t
@ -231,18 +231,19 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
return 0;
}
}
swp = DPAA2_PER_LCORE_PORTAL;
/* Check if there are atomic contexts to be released */
while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
while (DPAA2_PER_LCORE_DQRR_SIZE) {
if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
qbman_swp_dqrr_idx_consume(swp, i);
DPAA2_PER_LCORE_DPIO->dqrr_size--;
DPAA2_PER_LCORE_DQRR_SIZE--;
DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
DPAA2_INVALID_MBUF_SEQN;
}
i++;
}
DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
DPAA2_PER_LCORE_DQRR_HELD = 0;
do {
dq = qbman_swp_dqrr_next(swp);

View File

@ -10,6 +10,8 @@
#define DPAA2_MAX_BUF_POOLS 8
#define DPAA2_INVALID_MBUF_SEQN 0
struct buf_pool_cfg {
void *addr;
/**< The address from where DPAA2 will carve out the buffers */