net/sfc: support more options for a number of Tx descriptors

The number of Tx descriptors is not used as HW Tx ring size any more.
It simply defines maximum fill level.

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Andy Moreton <amoreton@solarflare.com>
This commit is contained in:
Andrew Rybchenko 2018-01-09 20:24:55 +00:00 committed by Ferruh Yigit
parent 3c335b7f5e
commit c7dadc9fb5
4 changed files with 41 additions and 4 deletions

View File

@ -79,6 +79,13 @@ struct sfc_dp_tx_qcreate_info {
volatile void *mem_bar;
};
/**
* Get Tx datapath specific device info.
*
* @param dev_info Device info to be adjusted
*/
typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
/**
* Get size of transmit and event queue rings by the number of Tx
* descriptors.
@ -162,6 +169,7 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
#define SFC_DP_TX_FEAT_REFCNT 0x20
sfc_dp_tx_get_dev_info_t *get_dev_info;
sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;

View File

@ -481,6 +481,17 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return pktp - &tx_pkts[0];
}
static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
static void
sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
{
/*
* Number of descriptors just defines maximum number of pushed
* descriptors (fill level).
*/
dev_info->tx_desc_lim.nb_min = 1;
dev_info->tx_desc_lim.nb_align = 1;
}
static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
static int
@ -489,9 +500,19 @@ sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
unsigned int *evq_entries,
unsigned int *txq_max_fill_level)
{
*txq_entries = nb_tx_desc;
*evq_entries = nb_tx_desc;
*txq_max_fill_level = SFC_EF10_TXQ_LIMIT(*txq_entries);
/*
* rte_ethdev API guarantees that the number meets min, max and
* alignment requirements.
*/
if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
*txq_entries = EFX_TXQ_MINNDESCS;
else
*txq_entries = rte_align32pow2(nb_tx_desc);
*evq_entries = *txq_entries;
*txq_max_fill_level = RTE_MIN(nb_tx_desc,
SFC_EF10_TXQ_LIMIT(*evq_entries));
return 0;
}
@ -637,6 +658,7 @@ struct sfc_dp_tx sfc_ef10_tx = {
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
.get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
@ -654,6 +676,7 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
.type = SFC_DP_TX,
},
.features = SFC_DP_TX_FEAT_MULTI_PROCESS,
.get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,

View File

@ -178,6 +178,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
*/
dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
/* Initialize to hardware limits */
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
/*
@ -188,6 +189,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (sa->dp_rx->get_dev_info != NULL)
sa->dp_rx->get_dev_info(dev_info);
if (sa->dp_tx->get_dev_info != NULL)
sa->dp_tx->get_dev_info(dev_info);
}
static const uint32_t *

View File

@ -160,6 +160,10 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
&txq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
SFC_ASSERT(txq_entries <= sa->txq_max_entries);
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
if (rc != 0)
@ -168,7 +172,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(sw_index < sa->txq_count);
txq_info = &sa->txq_info[sw_index];
SFC_ASSERT(txq_entries <= sa->txq_max_entries);
txq_info->entries = txq_entries;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,