diff --git a/MAINTAINERS b/MAINTAINERS index 1bcb25af18..e47563988e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -373,6 +373,7 @@ F: doc/guides/nics/nfp.rst NXP dpaa2 M: Hemant Agrawal F: drivers/bus/fslmc/ +F: drivers/mempool/dpaa2/ QLogic bnx2x M: Harish Patil diff --git a/config/common_base b/config/common_base index 94f1a60c7f..ab0750bdce 100644 --- a/config/common_base +++ b/config/common_base @@ -303,6 +303,11 @@ CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n # CONFIG_RTE_LIBRTE_FSLMC_BUS=n +# +# Compile Support Libraries for NXP DPAA2 +# +CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n + # # Compile burst-oriented VIRTIO PMD driver # diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc index 365ae5ad6b..0c280d86f4 100644 --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc @@ -42,6 +42,14 @@ CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd" CONFIG_RTE_MAX_LCORE=8 CONFIG_RTE_MAX_NUMA_NODES=1 +CONFIG_RTE_PKTMBUF_HEADROOM=256 + +# +# Compile Support Libraries for DPAA2 +# +CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y +CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2" + # # Compile NXP DPAA2 FSL-MC Bus # diff --git a/drivers/Makefile b/drivers/Makefile index fbc2351a30..19459fdab6 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -33,6 +33,7 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-y += bus DIRS-y += mempool +DEPDIRS-mempool := bus DIRS-y += net DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile index 0c6c45cb6b..8fd40e1b14 100644 --- a/drivers/mempool/Makefile +++ b/drivers/mempool/Makefile @@ -33,6 +33,8 @@ include $(RTE_SDK)/mk/rte.vars.mk core-libs := librte_eal librte_mempool librte_ring +DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2 +DEPDIRS-dpaa2 = $(core-libs) DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring DEPDIRS-ring = $(core-libs) DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile new file mode 100644 index 0000000000..26ccebdefb --- /dev/null +++ b/drivers/mempool/dpaa2/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2016 NXP. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of NXP nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_mempool_dpaa2.a + +ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) +CFLAGS += -O0 -g +CFLAGS += "-Wno-error" +else +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +endif + +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal + +# versioning export map +EXPORT_MAP := rte_mempool_dpaa2_version.map + +# Lbrary version +LIBABIVER := 1 + +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c + +LDLIBS += -lrte_bus_fslmc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c new file mode 100644 index 0000000000..5a5d6aaa77 --- /dev/null +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c @@ -0,0 +1,373 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (c) 2016 NXP. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Freescale Semiconductor, Inc nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "dpaa2_hw_mempool.h" + +struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID]; +static struct dpaa2_bp_list *h_bp_list; + +static int +rte_hw_mbuf_create_pool(struct rte_mempool *mp) +{ + struct dpaa2_bp_list *bp_list; + struct dpaa2_dpbp_dev *avail_dpbp; + struct dpbp_attr dpbp_attr; + uint32_t bpid; + int ret, p_ret; + + avail_dpbp = dpaa2_alloc_dpbp_dev(); + + if (!avail_dpbp) { + PMD_DRV_LOG(ERR, "DPAA2 resources not available"); + return -ENOENT; + } + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + return ret; + } + } + + ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Resource enable failure with" + " err code: %d\n", ret); + return ret; + } + + ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, + avail_dpbp->token, &dpbp_attr); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Resource read failure with" + " err code: %d\n", ret); + p_ret = ret; + ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, + avail_dpbp->token); + return p_ret; + } + + /* Allocate the bp_list which will be added into global_bp_list */ + bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list), + RTE_CACHE_LINE_SIZE); + if (!bp_list) { + PMD_INIT_LOG(ERR, "No heap memory available"); + return -ENOMEM; + } + + /* Set parameters of buffer pool list */ + bp_list->buf_pool.num_bufs = mp->size; + bp_list->buf_pool.size = mp->elt_size + - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp); + bp_list->buf_pool.bpid = dpbp_attr.bpid; + bp_list->buf_pool.h_bpool_mem = NULL; + bp_list->buf_pool.dpbp_node = avail_dpbp; + /* Identification for our offloaded pool_data structure */ + bp_list->dpaa2_ops_index = mp->ops_index; + bp_list->next = h_bp_list; + bp_list->mp = mp; + + bpid = dpbp_attr.bpid; + + rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) + + rte_pktmbuf_priv_size(mp); + rte_dpaa2_bpid_info[bpid].bp_list = bp_list; + rte_dpaa2_bpid_info[bpid].bpid = bpid; + + mp->pool_data = (void *)&rte_dpaa2_bpid_info[bpid]; + + PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid); + + h_bp_list = bp_list; + return 0; +} + +static void +rte_hw_mbuf_free_pool(struct rte_mempool *mp) +{ + struct dpaa2_bp_info *bpinfo; + struct dpaa2_bp_list *bp; + struct dpaa2_dpbp_dev *dpbp_node; + + if (!mp->pool_data) { + PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool"); + return; + } + + bpinfo = (struct dpaa2_bp_info *)mp->pool_data; + bp = bpinfo->bp_list; + dpbp_node = bp->buf_pool.dpbp_node; + + dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token); + + if (h_bp_list == bp) { + h_bp_list = h_bp_list->next; + } else { /* if it is not the first node */ + struct dpaa2_bp_list *prev = h_bp_list, *temp; + temp = h_bp_list->next; + while (temp) { + if (temp == bp) { + prev->next = temp->next; + free(bp); + break; + } + prev = temp; + temp = temp->next; + } + } + + dpaa2_free_dpbp_dev(dpbp_node); +} + +static void +rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, + void * const *obj_table, + uint32_t bpid, + uint32_t meta_data_size, + int count) +{ + struct qbman_release_desc releasedesc; + struct qbman_swp *swp; + int ret; + int i, n; + uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret != 0) { + RTE_LOG(ERR, PMD, "Failed to allocate IO portal"); + return; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + /* Create a release descriptor required for releasing + * buffers into QBMAN + */ + qbman_release_desc_clear(&releasedesc); + qbman_release_desc_set_bpid(&releasedesc, bpid); + + n = count % DPAA2_MBUF_MAX_ACQ_REL; + if (unlikely(!n)) + goto aligned; + + /* convert mbuf to buffers for the remainder */ + for (i = 0; i < n ; i++) { +#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA + bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i]) + + meta_data_size; +#else + bufs[i] = (uint64_t)obj_table[i] + meta_data_size; +#endif + } + + /* feed them to bman */ + do { + ret = qbman_swp_release(swp, &releasedesc, bufs, n); + } while (ret == -EBUSY); + +aligned: + /* if there are more buffers to free */ + while (n < count) { + /* convert mbuf to buffers */ + for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) { +#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA + bufs[i] = (uint64_t) + rte_mempool_virt2phy(pool, obj_table[n + i]) + + meta_data_size; +#else + bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size; +#endif + } + + do { + ret = qbman_swp_release(swp, &releasedesc, bufs, + DPAA2_MBUF_MAX_ACQ_REL); + } while (ret == -EBUSY); + n += DPAA2_MBUF_MAX_ACQ_REL; + } +} + +int +rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, + void **obj_table, unsigned int count) +{ +#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER + static int alloc; +#endif + struct qbman_swp *swp; + uint16_t bpid; + uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; + int i, ret; + unsigned int n = 0; + struct dpaa2_bp_info *bp_info; + + bp_info = mempool_to_bpinfo(pool); + + if (!(bp_info->bp_list)) { + RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); + return -ENOENT; + } + + bpid = bp_info->bpid; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret != 0) { + RTE_LOG(ERR, PMD, "Failed to allocate IO portal"); + return ret; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + while (n < count) { + /* Acquire is all-or-nothing, so we drain in 7s, + * then the remainder. + */ + if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { + ret = qbman_swp_acquire(swp, bpid, bufs, + DPAA2_MBUF_MAX_ACQ_REL); + } else { + ret = qbman_swp_acquire(swp, bpid, bufs, + count - n); + } + /* In case of less than requested number of buffers available + * in pool, qbman_swp_acquire returns 0 + */ + if (ret <= 0) { + PMD_TX_LOG(ERR, "Buffer acquire failed with" + " err code: %d", ret); + /* The API expect the exact number of requested bufs */ + /* Releasing all buffers allocated */ + rte_dpaa2_mbuf_release(pool, obj_table, bpid, + bp_info->meta_data_size, n); + return ret; + } + /* assigning mbuf from the acquired objects */ + for (i = 0; (i < ret) && bufs[i]; i++) { + DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t); + obj_table[n] = (struct rte_mbuf *) + (bufs[i] - bp_info->meta_data_size); + PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN", + (void *)bufs[i], (void *)obj_table[n]); + n++; + } + } + +#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER + alloc += n; + PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d", + alloc, count, n); +#endif + return 0; +} + +static int +rte_hw_mbuf_free_bulk(struct rte_mempool *pool, + void * const *obj_table, unsigned int n) +{ + struct dpaa2_bp_info *bp_info; + + bp_info = mempool_to_bpinfo(pool); + if (!(bp_info->bp_list)) { + RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured"); + return -ENOENT; + } + rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid, + bp_info->meta_data_size, n); + + return 0; +} + +static unsigned int +rte_hw_mbuf_get_count(const struct rte_mempool *mp) +{ + int ret; + unsigned int num_of_bufs = 0; + struct dpaa2_bp_info *bp_info; + struct dpaa2_dpbp_dev *dpbp_node; + + if (!mp || !mp->pool_data) { + RTE_LOG(ERR, PMD, "Invalid mempool provided"); + return 0; + } + + bp_info = (struct dpaa2_bp_info *)mp->pool_data; + dpbp_node = bp_info->bp_list->buf_pool.dpbp_node; + + ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW, + dpbp_node->token, &num_of_bufs); + if (ret) { + RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)", + ret); + return 0; + } + + RTE_LOG(DEBUG, PMD, "Free bufs = %u", num_of_bufs); + + return num_of_bufs; +} + +struct rte_mempool_ops dpaa2_mpool_ops = { + .name = "dpaa2", + .alloc = rte_hw_mbuf_create_pool, + .free = rte_hw_mbuf_free_pool, + .enqueue = rte_hw_mbuf_free_bulk, + .dequeue = rte_dpaa2_mbuf_alloc_bulk, + .get_count = rte_hw_mbuf_get_count, +}; + +MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h new file mode 100644 index 0000000000..c4d7fc02ef --- /dev/null +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h @@ -0,0 +1,91 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (c) 2016 NXP. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Freescale Semiconductor, Inc nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _DPAA2_HW_DPBP_H_ +#define _DPAA2_HW_DPBP_H_ + +#define DPAA2_MAX_BUF_POOLS 8 + +struct buf_pool_cfg { + void *addr; + /**< The address from where DPAA2 will carve out the buffers */ + phys_addr_t phys_addr; + /**< Physical address of the memory provided in addr */ + uint32_t num; + /**< Number of buffers */ + uint32_t size; + /**< Size including headroom for each buffer */ + uint16_t align; + /**< Buffer alignment (in bytes) */ + uint16_t bpid; + /**< Autogenerated buffer pool ID for internal use */ +}; + +struct buf_pool { + uint32_t size; /**< Size of the Pool */ + uint32_t num_bufs; /**< Number of buffers in Pool */ + uint16_t bpid; /**< Pool ID, from pool configuration */ + uint8_t *h_bpool_mem; /**< Internal context data */ + struct dpaa2_dpbp_dev *dpbp_node; /**< Hardware context */ +}; + +/*! + * Buffer pool list configuration structure. User need to give DPAA2 the + * valid number of 'num_buf_pools'. + */ +struct dpaa2_bp_list_cfg { + struct buf_pool_cfg buf_pool; /* Configuration of each buffer pool*/ +}; + +struct dpaa2_bp_list { + struct dpaa2_bp_list *next; + struct rte_mempool *mp; /**< DPDK RTE EAL pool reference */ + int32_t dpaa2_ops_index; /**< Index into DPDK Mempool ops table */ + struct buf_pool buf_pool; +}; + +struct dpaa2_bp_info { + uint32_t meta_data_size; + uint32_t bpid; + struct dpaa2_bp_list *bp_list; +}; + +#define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data) +#define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid) + +extern struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID]; + +int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, + void **obj_table, unsigned int count); + +#endif /* _DPAA2_HW_DPBP_H_ */ diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map new file mode 100644 index 0000000000..a8aa685c37 --- /dev/null +++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map @@ -0,0 +1,8 @@ +DPDK_17.05 { + global: + + rte_dpaa2_bpid_info; + rte_dpaa2_mbuf_alloc_bulk; + + local: *; +};