bus/dpaa: support static queues

DPAA hardware support two kinds of queues:
1. Pull mode queue - where one needs to regularly pull the packets.
2. Push mode queue - where the hw pushes the packet to queue. These are
   high performance queues, but limited in number.

This patch add the driver support for push mode queues.

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Hemant Agrawal 2018-01-10 16:16:39 +05:30 committed by Ferruh Yigit
parent 83c82e15e1
commit f56488258a
4 changed files with 83 additions and 3 deletions

View File

@ -1051,6 +1051,70 @@ u16 qman_affine_channel(int cpu)
return affine_channels[cpu]; return affine_channels[cpu];
} }
unsigned int qman_portal_poll_rx(unsigned int poll_limit,
void **bufs,
struct qman_portal *p)
{
const struct qm_dqrr_entry *dq;
struct qman_fq *fq;
enum qman_cb_dqrr_result res;
unsigned int limit = 0;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
struct qm_dqrr_entry *shadow;
#endif
unsigned int rx_number = 0;
do {
qm_dqrr_pvb_update(&p->p);
dq = qm_dqrr_current(&p->p);
if (unlikely(!dq))
break;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* If running on an LE system the fields of the
* dequeue entry must be swapper. Because the
* QMan HW will ignore writes the DQRR entry is
* copied and the index stored within the copy
*/
shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
/* SDQCR: context_b points to the FQ */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
fq = get_fq_table_entry(dq->contextB);
#else
fq = (void *)(uintptr_t)dq->contextB;
#endif
/* Now let the callback do its stuff */
res = fq->cb.dqrr_dpdk_cb(NULL, p, fq, dq, &bufs[rx_number]);
rx_number++;
/* Interpret 'dq' from a driver perspective. */
/*
* Parking isn't possible unless HELDACTIVE was set. NB,
* FORCEELIGIBLE implies HELDACTIVE, so we only need to
* check for HELDACTIVE to cover both.
*/
DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
(res != qman_cb_dqrr_park));
qm_dqrr_cdc_consume_1ptr(&p->p, dq, res == qman_cb_dqrr_park);
/* Move forward */
qm_dqrr_next(&p->p);
/*
* Entry processed and consumed, increment our counter. The
* callback can request that we exit after consuming the
* entry, and we also exit if we reach our processing limit,
* so loop back only if neither of these conditions is met.
*/
} while (likely(++limit < poll_limit));
return limit;
}
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq) struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
{ {
struct qman_portal *p = get_affine_portal(); struct qman_portal *p = get_affine_portal();

View File

@ -154,7 +154,7 @@ struct qm_eqcr {
}; };
struct qm_dqrr { struct qm_dqrr {
const struct qm_dqrr_entry *ring, *cursor; struct qm_dqrr_entry *ring, *cursor;
u8 pi, ci, fill, ithresh, vbit; u8 pi, ci, fill, ithresh, vbit;
#ifdef RTE_LIBRTE_DPAA_HWDEBUG #ifdef RTE_LIBRTE_DPAA_HWDEBUG
enum qm_dqrr_dmode dmode; enum qm_dqrr_dmode dmode;
@ -441,7 +441,7 @@ static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1); return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
} }
static inline const struct qm_dqrr_entry *DQRR_INC( static inline struct qm_dqrr_entry *DQRR_INC(
const struct qm_dqrr_entry *e) const struct qm_dqrr_entry *e)
{ {
return DQRR_CARRYCLEAR(e + 1); return DQRR_CARRYCLEAR(e + 1);

View File

@ -1124,6 +1124,12 @@ typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr); const struct qm_dqrr_entry *dqrr);
typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
struct qman_portal *qm,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr,
void **bd);
/* /*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
* are always consumed after the callback returns. * are always consumed after the callback returns.
@ -1182,7 +1188,10 @@ enum qman_fq_state {
*/ */
struct qman_fq_cb { struct qman_fq_cb {
qman_cb_dqrr dqrr; /* for dequeued frames */ union { /* for dequeued frames */
qman_dpdk_cb_dqrr dqrr_dpdk_cb;
qman_cb_dqrr dqrr;
};
qman_cb_mr ern; /* for s/w ERNs */ qman_cb_mr ern; /* for s/w ERNs */
qman_cb_mr fqs; /* frame-queue state changes*/ qman_cb_mr fqs; /* frame-queue state changes*/
}; };
@ -1299,6 +1308,9 @@ int qman_get_portal_index(void);
*/ */
u16 qman_affine_channel(int cpu); u16 qman_affine_channel(int cpu);
unsigned int qman_portal_poll_rx(unsigned int poll_limit,
void **bufs, struct qman_portal *q);
/** /**
* qman_set_vdq - Issue a volatile dequeue command * qman_set_vdq - Issue a volatile dequeue command
* @fq: Frame Queue on which the volatile dequeue command is issued * @fq: Frame Queue on which the volatile dequeue command is issued

View File

@ -70,11 +70,15 @@ DPDK_18.02 {
dpaa_svr_family; dpaa_svr_family;
qman_alloc_cgrid_range; qman_alloc_cgrid_range;
qman_alloc_pool_range;
qman_create_cgr; qman_create_cgr;
qman_delete_cgr; qman_delete_cgr;
qman_modify_cgr; qman_modify_cgr;
qman_oos_fq;
qman_portal_poll_rx;
qman_query_fq_frm_cnt; qman_query_fq_frm_cnt;
qman_release_cgrid_range; qman_release_cgrid_range;
qman_retire_fq;
rte_dpaa_portal_fq_close; rte_dpaa_portal_fq_close;
rte_dpaa_portal_fq_init; rte_dpaa_portal_fq_init;