mbuf: add raw allocation function

Many drivers provide their own implementation of rte_mbuf_raw_alloc(),
duplicating the code. Introduce a new public function in rte_mbuf to
allocate a raw mbuf (uninitialized).

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
Olivier Matz 2016-05-11 16:43:46 +02:00 committed by Thomas Monjalon
parent 8e483216fd
commit fbfd99551c
17 changed files with 50 additions and 185 deletions

View File

@ -89,17 +89,6 @@ static struct ether_addr cfg_ether_dst =
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
static inline struct rte_mbuf *
tx_mbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
static inline uint16_t
ip_sum(const unaligned_uint16_t *hdr, int hdr_len)
{
@ -167,7 +156,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
ol_flags = ports[fs->tx_port].tx_ol_flags;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = tx_mbuf_alloc(mbp);
pkt = rte_mbuf_raw_alloc(mbp);
if (!pkt)
break;

View File

@ -86,16 +86,6 @@
static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
static inline struct rte_mbuf *
tx_mbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
unsigned offset)
@ -225,7 +215,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = tx_mbuf_alloc(mbp);
pkt = rte_mbuf_raw_alloc(mbp);
if (pkt == NULL) {
nomore_mbuf:
if (nb_pkt == 0)
@ -240,7 +230,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
nb_segs = tx_pkt_nb_segs;
pkt_len = pkt->data_len;
for (i = 1; i < nb_segs; i++) {
pkt_seg->next = tx_mbuf_alloc(mbp);
pkt_seg->next = rte_mbuf_raw_alloc(mbp);
if (pkt_seg->next == NULL) {
pkt->nb_segs = i;
rte_pktmbuf_free(pkt);

View File

@ -11,17 +11,6 @@
#include "bnx2x.h"
#include "bnx2x_rxtx.h"
static inline struct rte_mbuf *
bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check(m, 0);
return m;
}
static const struct rte_memzone *
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, uint32_t ring_size, int socket_id)
@ -148,7 +137,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Initialize software ring entries */
rxq->rx_mbuf_alloc = 0;
for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
mbuf = bnx2x_rxmbuf_alloc(mp);
mbuf = rte_mbuf_raw_alloc(mp);
if (NULL == mbuf) {
PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
(unsigned)rxq->queue_id, idx);
@ -405,7 +394,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
len = cqe_fp->pkt_len_or_gro_seg_len;
pad = cqe_fp->placement_offset;
new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!new_mb)) {
PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
goto next_rx;

View File

@ -78,16 +78,6 @@
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@ -729,7 +719,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) status,
(unsigned) rte_le_to_cpu_16(rxd.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u",
@ -909,7 +899,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) status,
(unsigned) rte_le_to_cpu_16(rxd.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@ -1561,7 +1551,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile struct e1000_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "

View File

@ -79,16 +79,6 @@
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@ -838,7 +828,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@ -1021,7 +1011,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@ -1957,7 +1947,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
/* Initialize software ring entries. */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union e1000_adv_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "

View File

@ -60,17 +60,6 @@
#include "vnic_nic.h"
#include "enic_vnic_wq.h"
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
static inline int enic_is_sriov_vf(struct enic *enic)
{
return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
@ -347,7 +336,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
rq->ring.desc_count);
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
mb = rte_rxmbuf_alloc(rq->mp);
mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
(unsigned)rq->index);

View File

@ -57,16 +57,6 @@
#define rte_packet_prefetch(p) do {} while (0)
#endif
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
static inline uint16_t
enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
{
@ -283,7 +273,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
/* allocate a new mbuf */
nmb = rte_rxmbuf_alloc(rq->mp);
nmb = rte_mbuf_raw_alloc(rq->mp);
if (nmb == NULL) {
dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
enic->port_id, (unsigned)rq->index);

View File

@ -841,17 +841,6 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
}
}
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
/* Construct the tx flags */
static inline uint64_t
i40e_build_ctob(uint32_t td_cmd,
@ -1225,7 +1214,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
nmb = rte_rxmbuf_alloc(rxq->mp);
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb))
break;
rxd = *rxdp;
@ -1336,7 +1325,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
nmb = rte_rxmbuf_alloc(rxq->mp);
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb))
break;
rxd = *rxdp;
@ -2774,7 +2763,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union i40e_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!mbuf)) {
PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");

View File

@ -88,17 +88,6 @@
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
#if 1
#define RTE_PMD_USE_PREFETCH
#endif
@ -1609,7 +1598,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@ -1880,7 +1869,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
rte_le_to_cpu_16(rxd.wb.upper.length));
if (!bulk_alloc) {
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
"port_id=%u queue_id=%u",
@ -3861,7 +3850,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union ixgbe_adv_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",

View File

@ -3075,7 +3075,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* cacheline while allocating rep.
*/
rte_prefetch0(seg);
rep = __rte_mbuf_raw_alloc(rxq->mp);
rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
@ -3274,7 +3274,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (ret == 0)
break;
len = ret;
rep = __rte_mbuf_raw_alloc(rxq->mp);
rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,

View File

@ -929,7 +929,7 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* cacheline while allocating rep.
*/
rte_prefetch0(seg);
rep = __rte_mbuf_raw_alloc(rxq->mp);
rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
@ -1125,7 +1125,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
assert(ret >= (rxq->crc_present << 2));
len = ret - (rxq->crc_present << 2);
rep = __rte_mbuf_raw_alloc(rxq->mp);
rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,

View File

@ -516,7 +516,7 @@ mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
int i;
for (i = 0; i < count; i++) {
mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
if (!mbuf)
break;
mpipe_recv_push(priv, mbuf);
@ -1452,7 +1452,7 @@ mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
MPIPE_BUF_DEBT_THRESHOLD)
mpipe_local.mbuf_push_debt[in_port]++;
else {
mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
if (unlikely(!mbuf)) {
nb_nomem++;
gxio_mpipe_iqueue_drop(iqueue, idesc);

View File

@ -10,17 +10,6 @@
static bool gro_disable = 1; /* mod_param */
static inline struct
rte_mbuf *qede_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check(m, 0);
return m;
}
static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
{
struct rte_mbuf *new_mb = NULL;
@ -28,7 +17,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
dma_addr_t mapping;
uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
new_mb = qede_rxmbuf_alloc(rxq->mb_pool);
new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!new_mb)) {
PMD_RX_LOG(ERR, rxq,
"Failed to allocate rx buffer "

View File

@ -281,17 +281,6 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
vq_update_avail_ring(txvq, head_idx);
}
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
static void
virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
{
@ -343,7 +332,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;
while (!virtqueue_full(vq)) {
m = rte_rxmbuf_alloc(vq->mpool);
m = rte_mbuf_raw_alloc(vq->mpool);
if (m == NULL)
break;
@ -658,7 +647,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
while (likely(!virtqueue_full(rxvq))) {
new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
struct rte_eth_dev *dev
= &rte_eth_devices[rxvq->port_id];
@ -822,7 +811,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
while (likely(!virtqueue_full(rxvq))) {
new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
struct rte_eth_dev *dev
= &rte_eth_devices[rxvq->port_id];

View File

@ -86,16 +86,6 @@ static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
#endif
static struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
static void
vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
@ -544,7 +534,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
/* Allocate blank mbuf for the current Rx Descriptor */
mbuf = rte_rxmbuf_alloc(rxq->mp);
mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(mbuf == NULL)) {
PMD_RX_LOG(ERR, "Error allocating mbuf");
rxq->stats.rx_buf_alloc_failure++;

View File

@ -79,18 +79,6 @@ static struct rte_eth_link pmd_link = {
static void
eth_xenvirt_free_queues(struct rte_eth_dev *dev);
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
return m;
}
static uint16_t
eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@ -122,7 +110,7 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
/* allocate new mbuf for the used descriptor */
while (likely(!virtqueue_full(rxvq))) {
new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
break;
}
@ -293,7 +281,7 @@ eth_dev_start(struct rte_eth_dev *dev)
dev->data->dev_link.link_status = ETH_LINK_UP;
while (!virtqueue_full(rxvq)) {
m = rte_rxmbuf_alloc(rxvq->mpool);
m = rte_mbuf_raw_alloc(rxvq->mpool);
if (m == NULL)
break;
/* Enqueue allocated buffers. */

View File

@ -932,20 +932,11 @@ struct rte_pktmbuf_pool_private {
/** check mbuf type in debug mode */
#define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
/** check mbuf type in debug mode if mbuf pointer is not null */
#define __rte_mbuf_sanity_check_raw(m, is_h) do { \
if ((m) != NULL) \
rte_mbuf_sanity_check(m, is_h); \
} while (0)
#else /* RTE_LIBRTE_MBUF_DEBUG */
/** check mbuf type in debug mode */
#define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
/** check mbuf type in debug mode if mbuf pointer is not null */
#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
#endif /* RTE_LIBRTE_MBUF_DEBUG */
#ifdef RTE_MBUF_REFCNT_ATOMIC
@ -1058,9 +1049,12 @@ void
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
/**
* @internal Allocate a new mbuf from mempool *mp*.
* The use of that function is reserved for RTE internal needs.
* Please use rte_pktmbuf_alloc().
* Allocate an unitialized mbuf from mempool *mp*.
*
* This function can be used by PMDs (especially in RX functions) to
* allocate an unitialized mbuf. The driver is responsible of
* initializing all the required fields. See rte_pktmbuf_reset().
* For standard needs, prefer rte_pktmbuf_alloc().
*
* @param mp
* The mempool from which mbuf is allocated.
@ -1068,18 +1062,28 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
* - The pointer to the new mbuf on success.
* - NULL if allocation failed.
*/
static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
void *mb = NULL;
if (rte_mempool_get(mp, &mb) < 0)
return NULL;
m = (struct rte_mbuf *)mb;
RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mbuf_refcnt_set(m, 1);
__rte_mbuf_sanity_check(m, 0);
return m;
}
/* compat with older versions */
__rte_deprecated static inline struct rte_mbuf *
__rte_mbuf_raw_alloc(struct rte_mempool *mp)
{
return rte_mbuf_raw_alloc(mp);
}
/**
* @internal Put mbuf back into its original mempool.
* The use of that function is reserved for RTE internal needs.
@ -1343,7 +1347,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
rte_pktmbuf_reset(m);
return m;
}