mbuf: make segment prefree function public
Document the function and make it public, since it is used at several places in the drivers. The old one is marked as deprecated. Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
parent
27c270bc40
commit
54e9290269
@ -473,7 +473,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
|
||||
pool = ((struct rte_mbuf *)buf->mb)->pool;
|
||||
for (i = 0; i < nb_to_free; i++) {
|
||||
buf = &wq->bufs[tail_idx];
|
||||
m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
|
||||
m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
|
||||
buf->mb = NULL;
|
||||
|
||||
if (unlikely(m == NULL)) {
|
||||
|
@ -434,12 +434,12 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
|
||||
if (unlikely(num == 0))
|
||||
return;
|
||||
|
||||
m = __rte_pktmbuf_prefree_seg(txep[0]);
|
||||
m = rte_pktmbuf_prefree_seg(txep[0]);
|
||||
if (likely(m != NULL)) {
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
for (i = 1; i < num; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i]);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i]);
|
||||
if (likely(m != NULL)) {
|
||||
if (likely(m->pool == free[0]->pool))
|
||||
free[nb_free++] = m;
|
||||
@ -455,7 +455,7 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
|
||||
} else {
|
||||
for (i = 1; i < num; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i]);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i]);
|
||||
if (m != NULL)
|
||||
rte_mempool_put(m->pool, m);
|
||||
txep[i] = NULL;
|
||||
|
@ -754,12 +754,12 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
|
||||
* next_dd - (rs_thresh-1)
|
||||
*/
|
||||
txep = &txq->sw_ring[txq->next_dd - (n - 1)];
|
||||
m = __rte_pktmbuf_prefree_seg(txep[0]);
|
||||
m = rte_pktmbuf_prefree_seg(txep[0]);
|
||||
if (likely(m != NULL)) {
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
for (i = 1; i < n; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i]);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i]);
|
||||
if (likely(m != NULL)) {
|
||||
if (likely(m->pool == free[0]->pool))
|
||||
free[nb_free++] = m;
|
||||
@ -774,7 +774,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
|
||||
} else {
|
||||
for (i = 1; i < n; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i]);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i]);
|
||||
if (m != NULL)
|
||||
rte_mempool_put(m->pool, m);
|
||||
}
|
||||
|
@ -123,12 +123,12 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
|
||||
* tx_next_dd - (tx_rs_thresh-1)
|
||||
*/
|
||||
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
|
||||
m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
|
||||
if (likely(m != NULL)) {
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
for (i = 1; i < n; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
if (likely(m != NULL)) {
|
||||
if (likely(m->pool == free[0]->pool)) {
|
||||
free[nb_free++] = m;
|
||||
@ -144,7 +144,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
|
||||
} else {
|
||||
for (i = 1; i < n; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
if (m != NULL)
|
||||
rte_mempool_put(m->pool, m);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
|
||||
|
||||
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
|
||||
/* free buffers one at a time */
|
||||
m = __rte_pktmbuf_prefree_seg(txep->mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep->mbuf);
|
||||
txep->mbuf = NULL;
|
||||
|
||||
if (unlikely(m == NULL))
|
||||
|
@ -123,12 +123,12 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
|
||||
* tx_next_dd - (tx_rs_thresh-1)
|
||||
*/
|
||||
txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
|
||||
m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
|
||||
if (likely(m != NULL)) {
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
for (i = 1; i < n; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
if (likely(m != NULL)) {
|
||||
if (likely(m->pool == free[0]->pool))
|
||||
free[nb_free++] = m;
|
||||
@ -143,7 +143,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
|
||||
} else {
|
||||
for (i = 1; i < n; i++) {
|
||||
m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
|
||||
if (m != NULL)
|
||||
rte_mempool_put(m->pool, m);
|
||||
}
|
||||
|
@ -98,13 +98,13 @@ virtio_xmit_cleanup(struct virtqueue *vq)
|
||||
desc_idx = (uint16_t)(vq->vq_used_cons_idx &
|
||||
((vq->vq_nentries >> 1) - 1));
|
||||
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
|
||||
m = __rte_pktmbuf_prefree_seg(m);
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (likely(m != NULL)) {
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
|
||||
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
|
||||
m = __rte_pktmbuf_prefree_seg(m);
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (likely(m != NULL)) {
|
||||
if (likely(m->pool == free[0]->pool))
|
||||
free[nb_free++] = m;
|
||||
@ -123,7 +123,7 @@ virtio_xmit_cleanup(struct virtqueue *vq)
|
||||
} else {
|
||||
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
|
||||
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
|
||||
m = __rte_pktmbuf_prefree_seg(m);
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (m != NULL)
|
||||
rte_mempool_put(m->pool, m);
|
||||
}
|
||||
|
@ -1220,8 +1220,23 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
|
||||
__rte_mbuf_raw_free(md);
|
||||
}
|
||||
|
||||
static inline struct rte_mbuf* __attribute__((always_inline))
|
||||
__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
|
||||
/**
|
||||
* Decrease reference counter and unlink a mbuf segment
|
||||
*
|
||||
* This function does the same than a free, except that it does not
|
||||
* return the segment to its pool.
|
||||
* It decreases the reference counter, and if it reaches 0, it is
|
||||
* detached from its parent for an indirect mbuf.
|
||||
*
|
||||
* @param m
|
||||
* The mbuf to be unlinked
|
||||
* @return
|
||||
* - (m) if it is the last reference. It can be recycled or freed.
|
||||
* - (NULL) if the mbuf still has remaining references on it.
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline struct rte_mbuf *
|
||||
rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
|
||||
{
|
||||
__rte_mbuf_sanity_check(m, 0);
|
||||
|
||||
@ -1234,6 +1249,14 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* deprecated, replaced by rte_pktmbuf_prefree_seg() */
|
||||
__rte_deprecated
|
||||
static inline struct rte_mbuf *
|
||||
__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
|
||||
{
|
||||
return rte_pktmbuf_prefree_seg(m);
|
||||
}
|
||||
|
||||
/**
|
||||
* Free a segment of a packet mbuf into its original mempool.
|
||||
*
|
||||
@ -1246,7 +1269,8 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
|
||||
static inline void __attribute__((always_inline))
|
||||
rte_pktmbuf_free_seg(struct rte_mbuf *m)
|
||||
{
|
||||
if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (likely(m != NULL)) {
|
||||
m->next = NULL;
|
||||
__rte_mbuf_raw_free(m);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user