bus/fslmc: add dpio portal driver

The portal driver is bound to DPIO objects discovered on the fsl-mc bus and
provides services that:
- allow other drivers, such as the Ethernet driver, to enqueue and dequeue
  frames for their respective objects

A system will typically allocate 1 DPIO object per CPU to allow queuing
operations to happen simultaneously across all CPUs.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Hemant Agrawal 2017-04-11 19:07:17 +05:30 committed by Ferruh Yigit
parent 52d4f4c765
commit 5374e50f10
7 changed files with 520 additions and 1 deletions

View File

@ -67,6 +67,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
mc/dpio.c \
mc/mc_sys.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_vfio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_bus.c

View File

@ -61,6 +61,9 @@
#include "rte_fslmc.h"
#include "fslmc_vfio.h"
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
#define VFIO_MAX_CONTAINERS 1
#define FSLMC_VFIO_LOG(level, fmt, args...) \
@ -261,12 +264,13 @@ int fslmc_vfio_process_group(void)
struct fslmc_vfio_device *vdev;
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
char *temp_obj, *object_type, *mcp_obj, *dev_name;
int32_t object_id, i, dev_fd;
int32_t object_id, i, dev_fd, ret;
DIR *d;
struct dirent *dir;
char path[PATH_MAX];
int64_t v_addr;
int ndev_count;
int dpio_count = 0;
struct fslmc_vfio_group *group = &vfio_groups[0];
static int process_once;
@ -410,9 +414,20 @@ int fslmc_vfio_process_group(void)
fslmc_bus_add_device(dev);
}
if (!strcmp(object_type, "dpio")) {
ret = dpaa2_create_dpio_device(vdev,
&device_info,
object_id);
if (!ret)
dpio_count++;
}
}
closedir(d);
ret = dpaa2_affine_qbman_swp();
if (ret)
FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
return 0;
FAILURE:

View File

@ -71,4 +71,9 @@ int vfio_dmamap_mem_region(
int fslmc_vfio_setup_group(void);
int fslmc_vfio_process_group(void);
/* create dpio device */
int dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
struct vfio_device_info *obj_info,
int object_id);
#endif /* _FSLMC_VFIO_H_ */

View File

@ -0,0 +1,368 @@
/*-
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright (c) 2016 NXP. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Freescale Semiconductor, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <stdarg.h>
#include <inttypes.h>
#include <signal.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <fslmc_vfio.h>
#include "dpaa2_hw_pvt.h"
#include "dpaa2_hw_dpio.h"
#define NUM_HOST_CPUS RTE_MAX_LCORE
struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
static uint32_t io_space_count;
/*Stashing Macros default for LS208x*/
static int dpaa2_core_cluster_base = 0x04;
static int dpaa2_cluster_sz = 2;
/* For LS208X platform There are four clusters with following mapping:
* Cluster 1 (ID = x04) : CPU0, CPU1;
* Cluster 2 (ID = x05) : CPU2, CPU3;
* Cluster 3 (ID = x06) : CPU4, CPU5;
* Cluster 4 (ID = x07) : CPU6, CPU7;
*/
/* For LS108X platform There are two clusters with following mapping:
* Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
* Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
*/
/* Set the STASH Destination depending on Current CPU ID.
* e.g. Valid values of SDEST are 4,5,6,7. Where,
* CPU 0-1 will have SDEST 4
* CPU 2-3 will have SDEST 5.....and so on.
*/
static int
dpaa2_core_cluster_sdest(int cpu_id)
{
int x = cpu_id / dpaa2_cluster_sz;
if (x > 3)
x = 3;
return dpaa2_core_cluster_base + x;
}
static int
configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
{
struct qbman_swp_desc p_des;
struct dpio_attr attr;
dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
if (!dpio_dev->dpio) {
PMD_INIT_LOG(ERR, "Memory allocation failure\n");
return -1;
}
PMD_DRV_LOG(DEBUG, "\t Allocated DPIO Portal[%p]", dpio_dev->dpio);
dpio_dev->dpio->regs = dpio_dev->mc_portal;
if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
&dpio_dev->token)) {
PMD_INIT_LOG(ERR, "Failed to allocate IO space\n");
free(dpio_dev->dpio);
return -1;
}
if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
PMD_INIT_LOG(ERR, "Failed to reset dpio\n");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
PMD_INIT_LOG(ERR, "Failed to Enable dpio\n");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, &attr)) {
PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n");
dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
PMD_INIT_LOG(DEBUG, "Qbman Portal ID %d", attr.qbman_portal_id);
PMD_INIT_LOG(DEBUG, "Portal CE adr 0x%lX", attr.qbman_portal_ce_offset);
PMD_INIT_LOG(DEBUG, "Portal CI adr 0x%lX", attr.qbman_portal_ci_offset);
/* Configure & setup SW portal */
p_des.block = NULL;
p_des.idx = attr.qbman_portal_id;
p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
p_des.irq = -1;
p_des.qman_version = attr.qbman_version;
dpio_dev->sw_portal = qbman_swp_init(&p_des);
if (dpio_dev->sw_portal == NULL) {
PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
PMD_INIT_LOG(DEBUG, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
return 0;
}
static int
dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
{
int sdest;
int cpu_id, ret;
/* Set the Stashing Destination */
cpu_id = rte_lcore_id();
if (cpu_id < 0) {
cpu_id = rte_get_master_lcore();
if (cpu_id < 0) {
RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n");
return -1;
}
}
/* Set the STASH Destination depending on Current CPU ID.
* Valid values of SDEST are 4,5,6,7. Where,
* CPU 0-1 will have SDEST 4
* CPU 2-3 will have SDEST 5.....and so on.
*/
sdest = dpaa2_core_cluster_sdest(cpu_id);
PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d",
dpio_dev->index, cpu_id, sdest);
ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, sdest);
if (ret) {
PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
return -1;
}
return 0;
}
static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
{
struct dpaa2_dpio_dev *dpio_dev = NULL;
int ret;
/* Get DPIO dev handle from list using index */
TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
break;
}
if (!dpio_dev)
return NULL;
PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
dpio_dev, dpio_dev->index, syscall(SYS_gettid));
ret = dpaa2_configure_stashing(dpio_dev);
if (ret)
PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
return dpio_dev;
}
int
dpaa2_affine_qbman_swp(void)
{
unsigned int lcore_id = rte_lcore_id();
uint64_t tid = syscall(SYS_gettid);
if (lcore_id == LCORE_ID_ANY)
lcore_id = rte_get_master_lcore();
/* if the core id is not supported */
else if (lcore_id >= RTE_MAX_LCORE)
return -1;
if (dpaa2_io_portal[lcore_id].dpio_dev) {
PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
" between thread %lu and current %lu",
dpaa2_io_portal[lcore_id].dpio_dev,
dpaa2_io_portal[lcore_id].dpio_dev->index,
dpaa2_io_portal[lcore_id].net_tid,
tid);
RTE_PER_LCORE(_dpaa2_io).dpio_dev
= dpaa2_io_portal[lcore_id].dpio_dev;
rte_atomic16_inc(&dpaa2_io_portal
[lcore_id].dpio_dev->ref_count);
dpaa2_io_portal[lcore_id].net_tid = tid;
PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
dpaa2_io_portal[lcore_id].dpio_dev,
dpaa2_io_portal[lcore_id].dpio_dev->index,
tid);
return 0;
}
/* Populate the dpaa2_io_portal structure */
dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp();
if (dpaa2_io_portal[lcore_id].dpio_dev) {
RTE_PER_LCORE(_dpaa2_io).dpio_dev
= dpaa2_io_portal[lcore_id].dpio_dev;
dpaa2_io_portal[lcore_id].net_tid = tid;
return 0;
} else {
return -1;
}
}
int
dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
struct vfio_device_info *obj_info,
int object_id)
{
struct dpaa2_dpio_dev *dpio_dev;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
"of DPIO regions.\n");
return -1;
}
if (!dpio_dev_list) {
dpio_dev_list = malloc(sizeof(struct dpio_device_list));
if (!dpio_dev_list) {
PMD_INIT_LOG(ERR, "Memory alloc failed in DPIO list\n");
return -1;
}
/* Initialize the DPIO List */
TAILQ_INIT(dpio_dev_list);
}
dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
if (!dpio_dev) {
PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
return -1;
}
PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]", dpio_dev);
dpio_dev->dpio = NULL;
dpio_dev->hw_id = object_id;
dpio_dev->vfio_fd = vdev->fd;
rte_atomic16_init(&dpio_dev->ref_count);
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
reg_info.index = 0;
if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
free(dpio_dev);
return -1;
}
PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
dpio_dev->ce_size = reg_info.size;
dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
dpio_dev->vfio_fd, reg_info.offset);
/* Create Mapping for QBMan Cache Enabled area. This is a fix for
* SMMU fault for DQRR statshing transaction.
*/
if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
reg_info.offset, reg_info.size)) {
PMD_INIT_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
free(dpio_dev);
return -1;
}
reg_info.index = 1;
if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
free(dpio_dev);
return -1;
}
PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
dpio_dev->ci_size = reg_info.size;
dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
dpio_dev->vfio_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
PMD_INIT_LOG(ERR,
"Fail to configure the dpio qbman portal for %d\n",
dpio_dev->hw_id);
free(dpio_dev);
return -1;
}
io_space_count++;
dpio_dev->index = io_space_count;
TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
return 0;
}

View File

@ -0,0 +1,60 @@
/*-
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright (c) 2016 NXP. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Freescale Semiconductor, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_HW_DPIO_H_
#define _DPAA2_HW_DPIO_H_
#include <mc/fsl_dpio.h>
#include <mc/fsl_mc_sys.h>
struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *dpio_dev;
struct dpaa2_dpio_dev *sec_dpio_dev;
uint64_t net_tid;
uint64_t sec_tid;
};
/*! Global per thread DPIO portal */
RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
#define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev
#define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal
#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
/* Affine a DPIO portal to current processing thread */
int dpaa2_affine_qbman_swp(void);
#endif /* _DPAA2_HW_DPIO_H_ */

View File

@ -0,0 +1,68 @@
/*-
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright (c) 2016 NXP. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Freescale Semiconductor, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_HW_PVT_H_
#define _DPAA2_HW_PVT_H_
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
#define MC_PORTAL_INDEX 0
#define NUM_DPIO_REGIONS 2
struct dpaa2_dpio_dev {
TAILQ_ENTRY(dpaa2_dpio_dev) next;
/**< Pointer to Next device instance */
uint16_t index; /**< Index of a instance in the list */
rte_atomic16_t ref_count;
/**< How many thread contexts are sharing this.*/
struct fsl_mc_io *dpio; /** handle to DPIO portal object */
uint16_t token;
struct qbman_swp *sw_portal; /** SW portal object */
const struct qbman_result *dqrr[4];
/**< DQRR Entry for this SW portal */
void *mc_portal; /**< MC Portal for configuring this device */
uintptr_t qbman_portal_ce_paddr;
/**< Physical address of Cache Enabled Area */
uintptr_t ce_size; /**< Size of the CE region */
uintptr_t qbman_portal_ci_paddr;
/**< Physical address of Cache Inhibit Area */
uintptr_t ci_size; /**< Size of the CI region */
int32_t vfio_fd; /**< File descriptor received via VFIO */
int32_t hw_id; /**< An unique ID of this DPIO device instance */
};
/*! Global MCP list */
extern void *(*rte_mcp_ptr_list);
#endif

View File

@ -1,6 +1,7 @@
DPDK_17.05 {
global:
dpaa2_affine_qbman_swp;
dpbp_disable;
dpbp_enable;
dpbp_get_attributes;
@ -15,6 +16,7 @@ DPDK_17.05 {
dpio_reset;
dpio_set_stashing_destination;
mc_send_command;
per_lcore__dpaa2_io;
qbman_check_command_complete;
qbman_eq_desc_clear;
qbman_eq_desc_set_no_orp;