net/sfc: free mbufs in bulks on simple EF10 Tx reap

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
Ivan Malov 2017-09-13 10:33:34 +01:00 committed by Ferruh Yigit
parent d321954343
commit 093e1afa24
5 changed files with 44 additions and 2 deletions

View File

@ -245,12 +245,16 @@ boolean parameters value.
features available and required by the datapath implementation.
**efx** chooses libefx-based datapath which supports VLAN insertion
(full-feature firmware variant only), TSO and multi-segment mbufs.
Mbuf segments may come from different mempools, and mbuf reference
counters are treated responsibly.
**ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
more efficient than libefx-based but has no VLAN insertion and TSO
support yet.
Mbuf segments may come from different mempools, and mbuf reference
counters are treated responsibly.
**ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which
is even more faster then **ef10** but does not support multi-segment
mbufs.
mbufs, disallows multiple mempools and neglects mbuf reference counters.
- ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)

View File

@ -142,6 +142,8 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_TSO 0x2
#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
#define SFC_DP_TX_FEAT_REFCNT 0x20
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
sfc_dp_tx_qstart_t *qstart;

View File

@ -401,14 +401,25 @@ sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
unsigned int nb = 0;
do {
struct sfc_ef10_tx_sw_desc *txd;
txd = &txq->sw_ring[completed & ptr_mask];
rte_pktmbuf_free_seg(txd->mbuf);
if (nb == RTE_DIM(bulk)) {
rte_mempool_put_bulk(bulk[0]->pool,
(void *)bulk, nb);
nb = 0;
}
bulk[nb++] = txd->mbuf;
} while (++completed != pending);
rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
txq->completed = completed;
}
@ -614,6 +625,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_TX_FEAT_MULTI_SEG |
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,

View File

@ -145,6 +145,12 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
#if EFSYS_OPT_RX_SCALE
if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
dev_info->reta_size = EFX_RSS_TBL_SIZE;

View File

@ -91,6 +91,21 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
rc = EINVAL;
}
if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) &&
(~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) {
sfc_err(sa, "multi-mempool is not supported by %s datapath",
sa->dp_tx->dp.name);
rc = EINVAL;
}
if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) &&
(~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) {
sfc_err(sa,
"mbuf reference counters are neglected by %s datapath",
sa->dp_tx->dp.name);
rc = EINVAL;
}
if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
if (!encp->enc_hw_tx_insert_vlan_enabled) {
sfc_err(sa, "VLAN offload is not supported");
@ -1023,6 +1038,8 @@ struct sfc_dp_tx sfc_efx_tx = {
},
.features = SFC_DP_TX_FEAT_VLAN_INSERT |
SFC_DP_TX_FEAT_TSO |
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_SEG,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,