net/octeontx: support fast mbuf free

This patch adds capability to fast release of mbuf
following successful transmission.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
This commit is contained in:
Harman Kalra 2020-03-16 15:03:39 +05:30 committed by Ferruh Yigit
parent 7f4116bdbb
commit 5cbe184802
4 changed files with 99 additions and 10 deletions

View File

@ -267,7 +267,9 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
uint16_t flags = 0;
/* Created function for supoorting future offloads */
if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
flags |= OCCTX_TX_MULTI_SEG_F;

View File

@ -34,6 +34,7 @@
DEV_RX_OFFLOAD_JUMBO_FRAME)
#define OCTEONTX_TX_OFFLOADS (DEV_TX_OFFLOAD_MT_LOCKFREE | \
DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
DEV_TX_OFFLOAD_MULTI_SEGS)
static inline struct octeontx_nic *

View File

@ -41,8 +41,8 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return count; /* return number of pkts received */
}
#define T(name, f1, sz, flags) \
static uint16_t __rte_noinline __rte_hot \
#define T(name, f1, f0, sz, flags) \
static uint16_t __rte_noinline __rte_hot \
octeontx_xmit_pkts_ ##name(void *tx_queue, \
struct rte_mbuf **tx_pkts, uint16_t pkts) \
{ \
@ -60,14 +60,15 @@ octeontx_set_tx_function(struct rte_eth_dev *dev)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
const eth_tx_burst_t tx_burst_func[2] = {
#define T(name, f0, sz, flags) \
[f0] = octeontx_xmit_pkts_ ##name,
const eth_tx_burst_t tx_burst_func[2][2] = {
#define T(name, f1, f0, sz, flags) \
[f1][f0] = octeontx_xmit_pkts_ ##name,
OCCTX_TX_FASTPATH_MODES
#undef T
};
dev->tx_pkt_burst = tx_burst_func
[!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(nic->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
}

View File

@ -18,6 +18,7 @@
#define OCCTX_RX_MULTI_SEG_F BIT(15)
#define OCCTX_TX_OFFLOAD_NONE (0)
#define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
#define OCCTX_TX_MULTI_SEG_F BIT(15)
/* Packet type table */
@ -110,9 +111,75 @@ ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
};
static __rte_always_inline uint64_t
octeontx_pktmbuf_detach(struct rte_mbuf *m)
{
struct rte_mempool *mp = m->pool;
uint32_t mbuf_size, buf_len;
struct rte_mbuf *md;
uint16_t priv_size;
uint16_t refcount;
/* Update refcount of direct mbuf */
md = rte_mbuf_from_indirect(m);
refcount = rte_mbuf_refcnt_update(md, -1);
priv_size = rte_pktmbuf_priv_size(mp);
mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
buf_len = rte_pktmbuf_data_room_size(mp);
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
m->ol_flags = 0;
m->next = NULL;
m->nb_segs = 1;
/* Now indirect mbuf is safe to free */
rte_pktmbuf_free(m);
if (refcount == 0) {
rte_mbuf_refcnt_set(md, 1);
md->data_len = 0;
md->ol_flags = 0;
md->next = NULL;
md->nb_segs = 1;
return 0;
} else {
return 1;
}
}
static __rte_always_inline uint64_t
octeontx_prefree_seg(struct rte_mbuf *m)
{
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
if (!RTE_MBUF_DIRECT(m))
return octeontx_pktmbuf_detach(m);
m->next = NULL;
m->nb_segs = 1;
return 0;
} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
if (!RTE_MBUF_DIRECT(m))
return octeontx_pktmbuf_detach(m);
rte_mbuf_refcnt_set(m, 1);
m->next = NULL;
m->nb_segs = 1;
return 0;
}
/* Mbuf is having refcount more than 1 so need not to be freed */
return 1;
}
static __rte_always_inline uint16_t
__octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
const uint16_t flag __rte_unused)
const uint16_t flag)
{
uint16_t gaura_id, nb_desc = 0;
@ -120,6 +187,13 @@ __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
cmd_buf[nb_desc++] = 0x0;
/* SEND_HDR[DF] bit controls if buffer is to be freed or
* not, as SG_DESC[I] and SEND_HDR[II] are clear.
*/
if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
58);
/* Mark mempool object as "put" since it is freed by PKO */
if (!(cmd_buf[0] & (1ULL << 58)))
__mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
@ -140,7 +214,7 @@ __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
static __rte_always_inline uint16_t
__octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
const uint16_t flag __rte_unused)
const uint16_t flag)
{
uint16_t nb_segs, nb_desc = 0;
uint16_t gaura_id, len = 0;
@ -165,6 +239,14 @@ __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
PKO_SEND_GATHER_GAUAR((long)gaura_id) |
tx_pkt->data_len;
/* SG_DESC[I] bit controls if buffer is to be freed or
* not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
*/
if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
cmd_buf[nb_desc] |=
(octeontx_prefree_seg(tx_pkt) << 57);
}
/* Mark mempool object as "put" since it is freed by
* PKO.
*/
@ -218,10 +300,13 @@ __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t
octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
#define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F
#define MULT_F OCCTX_TX_MULTI_SEG_F
/* [NOFF] [MULTI_SEG] */
#define OCCTX_TX_FASTPATH_MODES \
T(no_offload, 0, 4, OCCTX_TX_OFFLOAD_NONE) \
T(mseg, 1, 14, MULT_F) \
T(no_offload, 0, 0, 4, OCCTX_TX_OFFLOAD_NONE) \
T(mseg, 0, 1, 14, MULT_F) \
T(noff, 1, 0, 4, NOFF_F) \
T(noff_mseg, 1, 1, 14, NOFF_F | MULT_F)
#endif /* __OCTEONTX_RXTX_H__ */