net/dpaa2: support low level loopback tester
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
parent
2d5f7f5274
commit
a3a997f02d
@ -499,6 +499,11 @@ for details.
|
||||
Done
|
||||
testpmd>
|
||||
|
||||
|
||||
* Use dev arg option ``drv_loopback=1`` to loopback packets at
|
||||
driver level. Any packet received will be reflected back by the
|
||||
driver on same port.
|
||||
|
||||
Enabling logs
|
||||
-------------
|
||||
|
||||
|
@ -1039,6 +1039,24 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
|
||||
const struct qbman_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_fd() - Enqueue multiple frames with same
|
||||
eq descriptor
|
||||
* @s: the software portal used for enqueue.
|
||||
* @d: the enqueue descriptor.
|
||||
* @fd: the frame descriptor to be enqueued.
|
||||
* @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
|
||||
* @num_frames: the number of the frames to be enqueued.
|
||||
*
|
||||
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
|
||||
*/
|
||||
int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
|
||||
* individual eq descriptor.
|
||||
|
@ -93,6 +93,20 @@ qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
|
||||
static int
|
||||
qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
|
||||
static int
|
||||
qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
|
||||
static int
|
||||
qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
@ -139,6 +153,13 @@ static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
|
||||
int num_frames)
|
||||
= qbman_swp_enqueue_multiple_direct;
|
||||
|
||||
static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
= qbman_swp_enqueue_multiple_fd_direct;
|
||||
|
||||
static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct qbman_fd *fd,
|
||||
@ -243,6 +264,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
|
||||
qbman_swp_enqueue_ring_mode_mem_back;
|
||||
qbman_swp_enqueue_multiple_ptr =
|
||||
qbman_swp_enqueue_multiple_mem_back;
|
||||
qbman_swp_enqueue_multiple_fd_ptr =
|
||||
qbman_swp_enqueue_multiple_fd_mem_back;
|
||||
qbman_swp_enqueue_multiple_desc_ptr =
|
||||
qbman_swp_enqueue_multiple_desc_mem_back;
|
||||
qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
|
||||
@ -862,6 +885,144 @@ inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
|
||||
return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
|
||||
}
|
||||
|
||||
static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
{
|
||||
uint32_t *p = NULL;
|
||||
const uint32_t *cl = qb_cl(d);
|
||||
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
|
||||
int i, num_enqueued = 0;
|
||||
uint64_t addr_cena;
|
||||
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
if (!s->eqcr.available) {
|
||||
eqcr_ci = s->eqcr.ci;
|
||||
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
|
||||
QBMAN_CENA_SWP_EQCR_CI) & full_mask;
|
||||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available)
|
||||
return 0;
|
||||
}
|
||||
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
num_enqueued = (s->eqcr.available < num_frames) ?
|
||||
s->eqcr.available : num_frames;
|
||||
s->eqcr.available -= num_enqueued;
|
||||
/* Fill in the EQCR ring */
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = qbman_cena_write_start_wo_shadow(&s->sys,
|
||||
QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
memcpy(&p[1], &cl[1], 28);
|
||||
memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
|
||||
eqcr_pi++;
|
||||
}
|
||||
|
||||
lwsync();
|
||||
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = qbman_cena_write_start_wo_shadow(&s->sys,
|
||||
QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
p[0] = cl[0] | s->eqcr.pi_vb;
|
||||
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
|
||||
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
|
||||
|
||||
d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
|
||||
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
|
||||
}
|
||||
eqcr_pi++;
|
||||
if (!(eqcr_pi & half_mask))
|
||||
s->eqcr.pi_vb ^= QB_VALID_BIT;
|
||||
}
|
||||
|
||||
/* Flush all the cacheline without load/store in between */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
addr_cena = (size_t)s->sys.addr_cena;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
dcbf(addr_cena +
|
||||
QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
eqcr_pi++;
|
||||
}
|
||||
s->eqcr.pi = eqcr_pi & full_mask;
|
||||
|
||||
return num_enqueued;
|
||||
}
|
||||
|
||||
static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
{
|
||||
uint32_t *p = NULL;
|
||||
const uint32_t *cl = qb_cl(d);
|
||||
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
|
||||
int i, num_enqueued = 0;
|
||||
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
if (!s->eqcr.available) {
|
||||
eqcr_ci = s->eqcr.ci;
|
||||
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
|
||||
QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
|
||||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available)
|
||||
return 0;
|
||||
}
|
||||
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
num_enqueued = (s->eqcr.available < num_frames) ?
|
||||
s->eqcr.available : num_frames;
|
||||
s->eqcr.available -= num_enqueued;
|
||||
/* Fill in the EQCR ring */
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = qbman_cena_write_start_wo_shadow(&s->sys,
|
||||
QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
memcpy(&p[1], &cl[1], 28);
|
||||
memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
|
||||
eqcr_pi++;
|
||||
}
|
||||
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = qbman_cena_write_start_wo_shadow(&s->sys,
|
||||
QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
p[0] = cl[0] | s->eqcr.pi_vb;
|
||||
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
|
||||
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
|
||||
|
||||
d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
|
||||
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
|
||||
}
|
||||
eqcr_pi++;
|
||||
if (!(eqcr_pi & half_mask))
|
||||
s->eqcr.pi_vb ^= QB_VALID_BIT;
|
||||
}
|
||||
s->eqcr.pi = eqcr_pi & full_mask;
|
||||
|
||||
dma_wmb();
|
||||
qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
|
||||
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
|
||||
return num_enqueued;
|
||||
}
|
||||
|
||||
inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
struct qbman_fd **fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
{
|
||||
return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
|
||||
}
|
||||
|
||||
static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct qbman_fd *fd,
|
||||
|
@ -141,5 +141,6 @@ DPDK_19.05 {
|
||||
qbman_result_eqresp_rc;
|
||||
qbman_result_eqresp_rspid;
|
||||
qbman_result_eqresp_set_rspid;
|
||||
qbman_swp_enqueue_multiple_fd;
|
||||
} DPDK_18.11;
|
||||
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "dpaa2_ethdev.h"
|
||||
#include <fsl_qbman_debug.h>
|
||||
|
||||
#define DRIVER_LOOPBACK_MODE "drv_looback"
|
||||
|
||||
/* Supported Rx offloads */
|
||||
static uint64_t dev_rx_offloads_sup =
|
||||
DEV_RX_OFFLOAD_VLAN_STRIP |
|
||||
@ -732,7 +734,8 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
|
||||
RTE_PTYPE_UNKNOWN
|
||||
};
|
||||
|
||||
if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
|
||||
if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
|
||||
dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
|
||||
return ptypes;
|
||||
return NULL;
|
||||
}
|
||||
@ -1997,6 +2000,43 @@ cleanup:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
check_devargs_handler(__rte_unused const char *key, const char *value,
|
||||
__rte_unused void *opaque)
|
||||
{
|
||||
if (strcmp(value, "1"))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
|
||||
{
|
||||
struct rte_kvargs *kvlist;
|
||||
|
||||
if (!devargs)
|
||||
return 0;
|
||||
|
||||
kvlist = rte_kvargs_parse(devargs->args, NULL);
|
||||
if (!kvlist)
|
||||
return 0;
|
||||
|
||||
if (!rte_kvargs_count(kvlist, key)) {
|
||||
rte_kvargs_free(kvlist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rte_kvargs_process(kvlist, key,
|
||||
check_devargs_handler, NULL) < 0) {
|
||||
rte_kvargs_free(kvlist);
|
||||
return 0;
|
||||
}
|
||||
rte_kvargs_free(kvlist);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
dpaa2_dev_init(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
@ -2016,7 +2056,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
|
||||
* plugged.
|
||||
*/
|
||||
eth_dev->dev_ops = &dpaa2_ethdev_ops;
|
||||
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
|
||||
if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
|
||||
eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
|
||||
else
|
||||
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
|
||||
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
|
||||
return 0;
|
||||
}
|
||||
@ -2133,7 +2176,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
eth_dev->dev_ops = &dpaa2_ethdev_ops;
|
||||
|
||||
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
|
||||
if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
|
||||
eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
|
||||
DPAA2_PMD_INFO("Loopback mode");
|
||||
} else {
|
||||
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
|
||||
}
|
||||
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
|
||||
|
||||
RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
|
||||
@ -2251,7 +2299,8 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
|
||||
};
|
||||
|
||||
RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
|
||||
|
||||
RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
|
||||
DRIVER_LOOPBACK_MODE "=<int>");
|
||||
RTE_INIT(dpaa2_pmd_init_log)
|
||||
{
|
||||
dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
|
||||
|
@ -125,6 +125,9 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
|
||||
int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
|
||||
int eth_rx_queue_id);
|
||||
|
||||
uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
|
||||
uint16_t nb_pkts);
|
||||
|
||||
uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
|
||||
uint16_t nb_pkts);
|
||||
void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
|
||||
|
@ -1143,3 +1143,164 @@ dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
(void)nb_pkts;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(RTE_TOOLCHAIN_GCC)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wcast-qual"
|
||||
#elif defined(RTE_TOOLCHAIN_CLANG)
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wcast-qual"
|
||||
#endif
|
||||
|
||||
/* This function loopbacks all the received packets.*/
|
||||
uint16_t
|
||||
dpaa2_dev_loopback_rx(void *queue,
|
||||
struct rte_mbuf **bufs __rte_unused,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
/* Function receive frames for a given device and VQ*/
|
||||
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
|
||||
struct qbman_result *dq_storage, *dq_storage1 = NULL;
|
||||
uint32_t fqid = dpaa2_q->fqid;
|
||||
int ret, num_rx = 0, num_tx = 0, pull_size;
|
||||
uint8_t pending, status;
|
||||
struct qbman_swp *swp;
|
||||
struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
|
||||
struct qbman_pull_desc pulldesc;
|
||||
struct qbman_eq_desc eqdesc;
|
||||
struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
|
||||
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
|
||||
struct dpaa2_dev_priv *priv = eth_data->dev_private;
|
||||
struct dpaa2_queue *tx_q = priv->tx_vq[0];
|
||||
/* todo - currently we are using 1st TX queue only for loopback*/
|
||||
|
||||
if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
|
||||
ret = dpaa2_affine_qbman_ethrx_swp();
|
||||
if (ret) {
|
||||
DPAA2_PMD_ERR("Failure in affining portal");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
|
||||
pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
|
||||
if (unlikely(!q_storage->active_dqs)) {
|
||||
q_storage->toggle = 0;
|
||||
dq_storage = q_storage->dq_storage[q_storage->toggle];
|
||||
q_storage->last_num_pkts = pull_size;
|
||||
qbman_pull_desc_clear(&pulldesc);
|
||||
qbman_pull_desc_set_numframes(&pulldesc,
|
||||
q_storage->last_num_pkts);
|
||||
qbman_pull_desc_set_fq(&pulldesc, fqid);
|
||||
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
|
||||
(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
|
||||
if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
|
||||
while (!qbman_check_command_complete(
|
||||
get_swp_active_dqs(
|
||||
DPAA2_PER_LCORE_ETHRX_DPIO->index)))
|
||||
;
|
||||
clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
|
||||
}
|
||||
while (1) {
|
||||
if (qbman_swp_pull(swp, &pulldesc)) {
|
||||
DPAA2_PMD_DP_DEBUG(
|
||||
"VDQ command not issued.QBMAN busy\n");
|
||||
/* Portal was busy, try again */
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
q_storage->active_dqs = dq_storage;
|
||||
q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
|
||||
set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
|
||||
dq_storage);
|
||||
}
|
||||
|
||||
dq_storage = q_storage->active_dqs;
|
||||
rte_prefetch0((void *)(size_t)(dq_storage));
|
||||
rte_prefetch0((void *)(size_t)(dq_storage + 1));
|
||||
|
||||
/* Prepare next pull descriptor. This will give space for the
|
||||
* prefething done on DQRR entries
|
||||
*/
|
||||
q_storage->toggle ^= 1;
|
||||
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
|
||||
qbman_pull_desc_clear(&pulldesc);
|
||||
qbman_pull_desc_set_numframes(&pulldesc, pull_size);
|
||||
qbman_pull_desc_set_fq(&pulldesc, fqid);
|
||||
qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
|
||||
(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
|
||||
|
||||
/*Prepare enqueue descriptor*/
|
||||
qbman_eq_desc_clear(&eqdesc);
|
||||
qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
|
||||
qbman_eq_desc_set_response(&eqdesc, 0, 0);
|
||||
qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
|
||||
|
||||
/* Check if the previous issued command is completed.
|
||||
* Also seems like the SWP is shared between the Ethernet Driver
|
||||
* and the SEC driver.
|
||||
*/
|
||||
while (!qbman_check_command_complete(dq_storage))
|
||||
;
|
||||
if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
|
||||
clear_swp_active_dqs(q_storage->active_dpio_id);
|
||||
|
||||
pending = 1;
|
||||
|
||||
do {
|
||||
/* Loop until the dq_storage is updated with
|
||||
* new token by QBMAN
|
||||
*/
|
||||
while (!qbman_check_new_result(dq_storage))
|
||||
;
|
||||
rte_prefetch0((void *)((size_t)(dq_storage + 2)));
|
||||
/* Check whether Last Pull command is Expired and
|
||||
* setting Condition for Loop termination
|
||||
*/
|
||||
if (qbman_result_DQ_is_pull_complete(dq_storage)) {
|
||||
pending = 0;
|
||||
/* Check for valid frame. */
|
||||
status = qbman_result_DQ_flags(dq_storage);
|
||||
if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
|
||||
continue;
|
||||
}
|
||||
fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
|
||||
|
||||
dq_storage++;
|
||||
num_rx++;
|
||||
} while (pending);
|
||||
|
||||
while (num_tx < num_rx) {
|
||||
num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
|
||||
&fd[num_tx], 0, num_rx - num_tx);
|
||||
}
|
||||
|
||||
if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
|
||||
while (!qbman_check_command_complete(
|
||||
get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
|
||||
;
|
||||
clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
|
||||
}
|
||||
/* issue a volatile dequeue command for next pull */
|
||||
while (1) {
|
||||
if (qbman_swp_pull(swp, &pulldesc)) {
|
||||
DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
|
||||
"QBMAN is busy (2)\n");
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
q_storage->active_dqs = dq_storage1;
|
||||
q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
|
||||
set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
|
||||
|
||||
dpaa2_q->rx_pkts += num_rx;
|
||||
dpaa2_q->tx_pkts += num_tx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#if defined(RTE_TOOLCHAIN_GCC)
|
||||
#pragma GCC diagnostic pop
|
||||
#elif defined(RTE_TOOLCHAIN_CLANG)
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user