numam-dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h
Feifei Wang be8ff62108 net/i40e: improve vector Tx performance
For i40e vector Tx path, if tx_offload is set as FAST_FREE_MBUF mode,
no mbuf fast free operations are executed. To fix this, add mbuf fast
free mode for vector Tx path.

Furthermore, for i40e vector Tx path, if implement FAST_FREE_MBUF mode,
it means per-queue all mbufs come from the same mempool and have
refcnt = 1. Thus we can use bulk free of the buffers when mbuf fast free
mode is enabled.

For vector path in arm platform:
In n1sdp, performance is improved by 18.4%;
In thunderx2, performance is improved by 23%.

For vector path in x86 platform:
No performance changes.

Suggested-by: Ruifeng Wang <ruifeng.wang@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
2021-07-06 04:59:01 +02:00

468 lines
13 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _I40E_RXTX_VEC_COMMON_H_
#define _I40E_RXTX_VEC_COMMON_H_
#include <stdint.h>
#include <ethdev_driver.h>
#include <rte_malloc.h>
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
static inline uint16_t
reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
uint16_t nb_bufs, uint8_t *split_flags)
{
struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
struct rte_mbuf *start = rxq->pkt_first_seg;
struct rte_mbuf *end = rxq->pkt_last_seg;
unsigned pkt_idx, buf_idx;
for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
if (end != NULL) {
/* processing a split packet */
end->next = rx_bufs[buf_idx];
rx_bufs[buf_idx]->data_len += rxq->crc_len;
start->nb_segs++;
start->pkt_len += rx_bufs[buf_idx]->data_len;
end = end->next;
if (!split_flags[buf_idx]) {
/* it's the last packet of the set */
start->hash = end->hash;
start->vlan_tci = end->vlan_tci;
start->ol_flags = end->ol_flags;
/* we need to strip crc for the whole packet */
start->pkt_len -= rxq->crc_len;
if (end->data_len > rxq->crc_len)
end->data_len -= rxq->crc_len;
else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
start->nb_segs--;
while (secondlast->next != end)
secondlast = secondlast->next;
secondlast->data_len -= (rxq->crc_len -
end->data_len);
secondlast->next = NULL;
rte_pktmbuf_free_seg(end);
}
pkts[pkt_idx++] = start;
start = end = NULL;
}
} else {
/* not processing a split packet */
if (!split_flags[buf_idx]) {
/* not a split packet, save and skip */
pkts[pkt_idx++] = rx_bufs[buf_idx];
continue;
}
end = start = rx_bufs[buf_idx];
rx_bufs[buf_idx]->data_len += rxq->crc_len;
rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
}
}
/* save the partial packet for next time */
rxq->pkt_first_seg = start;
rxq->pkt_last_seg = end;
memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
return pkt_idx;
}
static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
n = txq->tx_rs_thresh;
/* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < n; i++) {
free[i] = txep[i].mbuf;
txep[i].mbuf = NULL;
}
rte_mempool_put_bulk(free[0]->pool, (void **)free, n);
goto done;
}
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool)) {
free[nb_free++] = m;
} else {
rte_mempool_put_bulk(free[0]->pool,
(void *)free,
nb_free);
free[0] = m;
nb_free = 1;
}
}
}
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
}
done:
/* buffers were freed, update counters */
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
if (txq->tx_next_dd >= txq->nb_tx_desc)
txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
return txq->tx_rs_thresh;
}
static __rte_always_inline void
tx_backlog_entry(struct i40e_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
for (i = 0; i < (int)nb_pkts; ++i)
txep[i].mbuf = tx_pkts[i];
}
static inline void
_i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
const unsigned mask = rxq->nb_rx_desc - 1;
unsigned i;
if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
return;
/* free all mbufs that are valid in the ring */
if (rxq->rxrearm_nb == 0) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf != NULL)
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
}
} else {
for (i = rxq->rx_tail;
i != rxq->rxrearm_start;
i = (i + 1) & mask) {
if (rxq->sw_ring[i].mbuf != NULL)
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
}
}
rxq->rxrearm_nb = rxq->nb_rx_desc;
/* set all entries to NULL */
memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
}
static inline int
i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
mb_def.port = rxq->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
/* prevent compiler reordering: rearm_data covers previous fields */
rte_compiler_barrier();
p = (uintptr_t)&mb_def.rearm_data;
rxq->mbuf_initializer = *(uint64_t *)p;
return 0;
}
static inline int
i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct i40e_rx_queue *rxq;
uint16_t desc, i;
bool first_queue;
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
/* no header split support */
if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
/* no QinQ support */
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
/**
* Vector mode is allowed only when number of Rx queue
* descriptor is power of 2.
*/
if (!dev->data->dev_started) {
first_queue = true;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
desc = rxq->nb_rx_desc;
if (first_queue)
ad->rx_vec_allowed =
rte_is_power_of_2(desc);
else
ad->rx_vec_allowed =
ad->rx_vec_allowed ?
rte_is_power_of_2(desc) :
ad->rx_vec_allowed;
first_queue = false;
}
} else {
/* Only check the first queue's descriptor number */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
desc = rxq->nb_rx_desc;
ad->rx_vec_allowed = rte_is_power_of_2(desc);
break;
}
}
return 0;
#else
RTE_SET_USED(dev);
return -1;
#endif
}
#ifdef CC_AVX2_SUPPORT
static __rte_always_inline void
i40e_rxq_rearm_common(struct i40e_rx_queue *rxq, __rte_unused bool avx512)
{
int i;
uint16_t rx_id;
volatile union i40e_rx_desc *rxdp;
struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
rxdp = rxq->rx_ring + rxq->rxrearm_start;
/* Pull 'n' more MBUFs into the software ring */
if (rte_mempool_get_bulk(rxq->mp,
(void *)rxep,
RTE_I40E_RXQ_REARM_THRESH) < 0) {
if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
rxq->nb_rx_desc) {
__m128i dma_addr0;
dma_addr0 = _mm_setzero_si128();
for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
rxep[i].mbuf = &rxq->fake_mbuf;
_mm_store_si128((__m128i *)&rxdp[i].read,
dma_addr0);
}
}
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
RTE_I40E_RXQ_REARM_THRESH;
return;
}
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
struct rte_mbuf *mb0, *mb1;
__m128i dma_addr0, dma_addr1;
__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
RTE_PKTMBUF_HEADROOM);
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
/* convert pa to dma_addr hdr/data */
dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
/* add headroom to pa values */
dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
/* flush desc with pa dma_addr */
_mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
}
#else
#ifdef CC_AVX512_SUPPORT
if (avx512) {
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
struct rte_mbuf *mb4, *mb5, *mb6, *mb7;
__m512i dma_addr0_3, dma_addr4_7;
__m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
/* Initialize the mbufs in vector, process 8 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
i += 8, rxep += 8, rxdp += 8) {
__m128i vaddr0, vaddr1, vaddr2, vaddr3;
__m128i vaddr4, vaddr5, vaddr6, vaddr7;
__m256i vaddr0_1, vaddr2_3;
__m256i vaddr4_5, vaddr6_7;
__m512i vaddr0_3, vaddr4_7;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
mb2 = rxep[2].mbuf;
mb3 = rxep[3].mbuf;
mb4 = rxep[4].mbuf;
mb5 = rxep[5].mbuf;
mb6 = rxep[6].mbuf;
mb7 = rxep[7].mbuf;
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr);
vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr);
vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr);
vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr);
/**
* merge 0 & 1, by casting 0 to 256-bit and inserting 1
* into the high lanes. Similarly for 2 & 3, and so on.
*/
vaddr0_1 =
_mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
vaddr1, 1);
vaddr2_3 =
_mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
vaddr3, 1);
vaddr4_5 =
_mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4),
vaddr5, 1);
vaddr6_7 =
_mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6),
vaddr7, 1);
vaddr0_3 =
_mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
vaddr2_3, 1);
vaddr4_7 =
_mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5),
vaddr6_7, 1);
/* convert pa to dma_addr hdr/data */
dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3);
dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7);
/* add headroom to pa values */
dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room);
dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
/* flush desc with pa dma_addr */
_mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3);
_mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7);
}
} else
#endif
{
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
__m256i dma_addr0_1, dma_addr2_3;
__m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
/* Initialize the mbufs in vector, process 4 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
i += 4, rxep += 4, rxdp += 4) {
__m128i vaddr0, vaddr1, vaddr2, vaddr3;
__m256i vaddr0_1, vaddr2_3;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
mb2 = rxep[2].mbuf;
mb3 = rxep[3].mbuf;
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
/*
* merge 0 & 1, by casting 0 to 256-bit and inserting 1
* into the high lanes. Similarly for 2 & 3
*/
vaddr0_1 = _mm256_inserti128_si256(
_mm256_castsi128_si256(vaddr0), vaddr1, 1);
vaddr2_3 = _mm256_inserti128_si256(
_mm256_castsi128_si256(vaddr2), vaddr3, 1);
/* convert pa to dma_addr hdr/data */
dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
/* add headroom to pa values */
dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
/* flush desc with pa dma_addr */
_mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
_mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
}
}
#endif
rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
if (rxq->rxrearm_start >= rxq->nb_rx_desc)
rxq->rxrearm_start = 0;
rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
/* Update the tail pointer on the NIC */
I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
}
#endif
#endif