net/ice: optimize Tx by using AVX512

Optimize Tx path by using AVX512 instructions and vectorize the
tx free bufs process.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Leyi Rong 2020-10-23 12:14:07 +08:00 committed by Ferruh Yigit
parent cda28b1f59
commit a4e480de26
3 changed files with 158 additions and 29 deletions

View File

@ -96,6 +96,10 @@ struct ice_tx_entry {
uint16_t last_id;
};
struct ice_vec_tx_entry {
struct rte_mbuf *mbuf;
};
struct ice_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
rte_iova_t tx_ring_dma; /* TX ring DMA address */

View File

@ -756,6 +756,108 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_pkts + retval, nb_pkts);
}
static __rte_always_inline int
ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
{
struct ice_vec_tx_entry *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
n = txq->tx_rs_thresh;
/* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh - 1)
*/
txep = (void *)txq->sw_ring;
txep += txq->tx_next_dd - (n - 1);
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
void **cache_objs = &cache->objs[cache->len];
if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
goto done;
}
/* The cache follows the following algorithm
* 1. Add the objects to the cache
* 2. Anything greater than the cache min value (if it
* crosses the cache flush threshold) is flushed to the ring.
*/
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
while (copied < n) {
const __m512i a = _mm512_loadu_si512(&txep[copied]);
const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
_mm512_storeu_si512(&cache_objs[copied], a);
_mm512_storeu_si512(&cache_objs[copied + 8], b);
_mm512_storeu_si512(&cache_objs[copied + 16], c);
_mm512_storeu_si512(&cache_objs[copied + 24], d);
copied += 32;
}
cache->len += n;
if (cache->len >= cache->flushthresh) {
rte_mempool_ops_enqueue_bulk
(mp, &cache->objs[cache->size],
cache->len - cache->size);
cache->len = cache->size;
}
goto done;
}
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m)) {
if (likely(m->pool == free[0]->pool)) {
free[nb_free++] = m;
} else {
rte_mempool_put_bulk(free[0]->pool,
(void *)free,
nb_free);
free[0] = m;
nb_free = 1;
}
}
}
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m)
rte_mempool_put(m->pool, m);
}
}
done:
/* buffers were freed, update counters */
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
if (txq->tx_next_dd >= txq->nb_tx_desc)
txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
return txq->tx_rs_thresh;
}
static inline void
ice_vtx1(volatile struct ice_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
@ -777,13 +879,6 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
((uint64_t)flags << ICE_TXD_QW1_CMD_S));
/* if unaligned on 32-bit boundary, do one to align */
if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
ice_vtx1(txdp, *pkt, flags);
nb_pkts--, txdp++, pkt++;
}
/* do two at a time while possible, in bursts */
for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
uint64_t hi_qw3 =
hi_qw_tmpl |
@ -802,20 +897,17 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
((uint64_t)pkt[0]->data_len <<
ICE_TXD_QW1_TX_BUF_SZ_S);
__m256i desc2_3 =
_mm256_set_epi64x
__m512i desc0_3 =
_mm512_set_epi64
(hi_qw3,
pkt[3]->buf_iova + pkt[3]->data_off,
hi_qw2,
pkt[2]->buf_iova + pkt[2]->data_off);
__m256i desc0_1 =
_mm256_set_epi64x
(hi_qw1,
pkt[2]->buf_iova + pkt[2]->data_off,
hi_qw1,
pkt[1]->buf_iova + pkt[1]->data_off,
hi_qw0,
pkt[0]->buf_iova + pkt[0]->data_off);
_mm256_store_si256((void *)(txdp + 2), desc2_3);
_mm256_store_si256((void *)txdp, desc0_1);
_mm512_storeu_si512((void *)txdp, desc0_3);
}
/* do any last ones */
@ -825,13 +917,23 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
}
}
static __rte_always_inline void
ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
for (i = 0; i < (int)nb_pkts; ++i)
txep[i].mbuf = tx_pkts[i];
}
static inline uint16_t
ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
struct ice_tx_entry *txep;
struct ice_vec_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
@ -840,7 +942,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
ice_tx_free_bufs(txq);
ice_tx_free_bufs_avx512(txq);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@ -848,13 +950,14 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
ice_tx_backlog_entry(txep, tx_pkts, n);
ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
ice_vtx(txdp, tx_pkts, n - 1, flags);
tx_pkts += (n - 1);
@ -868,11 +971,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
txdp = &txq->tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txdp = txq->tx_ring;
txep = (void *)txq->sw_ring;
}
ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
ice_vtx(txdp, tx_pkts, nb_commit, flags);

View File

@ -189,16 +189,38 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
* so need to free remains more carefully.
*/
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
if (txq->tx_tail < i) {
for (; i < txq->nb_tx_desc; i++) {
#ifdef CC_AVX512_SUPPORT
struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {
struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
if (txq->tx_tail < i) {
for (; i < txq->nb_tx_desc; i++) {
rte_pktmbuf_free_seg(swr[i].mbuf);
swr[i].mbuf = NULL;
}
i = 0;
}
for (; i < txq->tx_tail; i++) {
rte_pktmbuf_free_seg(swr[i].mbuf);
swr[i].mbuf = NULL;
}
} else
#endif
{
if (txq->tx_tail < i) {
for (; i < txq->nb_tx_desc; i++) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
i = 0;
}
for (; i < txq->tx_tail; i++) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
i = 0;
}
for (; i < txq->tx_tail; i++) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
}