net/sfc: support deferred start of transmit queues

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Andrew Lee <alee@solarflare.com>
Reviewed-by: Robert Stonehouse <rstonehouse@solarflare.com>
This commit is contained in:
Ivan Malov 2016-12-15 12:51:14 +00:00 committed by Ferruh Yigit
parent 21f6411c89
commit c6a1d9b5ab
5 changed files with 65 additions and 10 deletions

View File

@ -7,7 +7,7 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
Queue start/stop = P
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y

View File

@ -71,7 +71,7 @@ SFC EFX PMD has support for:
- Scattered Rx DMA for packet that are larger that a single Rx descriptor
- Deferred receive queue start
- Deferred receive and transmit queue start
Non-supported Features

View File

@ -864,6 +864,7 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
qinfo->conf.txq_flags = txq_info->txq->flags;
qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
qinfo->conf.tx_deferred_start = txq_info->deferred_start;
qinfo->nb_desc = txq_info->entries;
sfc_adapter_unlock(sa);
@ -935,6 +936,54 @@ sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return 0;
}
static int
sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
sfc_log_init(sa, "TxQ = %u", tx_queue_id);
sfc_adapter_lock(sa);
rc = EINVAL;
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
rc = sfc_tx_qstart(sa, tx_queue_id);
if (rc != 0)
goto fail_tx_qstart;
sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
return 0;
fail_tx_qstart:
fail_not_started:
sfc_adapter_unlock(sa);
SFC_ASSERT(rc > 0);
return -rc;
}
static int
sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct sfc_adapter *sa = dev->data->dev_private;
sfc_log_init(sa, "TxQ = %u", tx_queue_id);
sfc_adapter_lock(sa);
sfc_tx_qstop(sa, tx_queue_id);
sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
return 0;
}
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
@ -955,6 +1004,8 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.mtu_set = sfc_dev_set_mtu,
.rx_queue_start = sfc_rx_queue_start,
.rx_queue_stop = sfc_rx_queue_stop,
.tx_queue_start = sfc_tx_queue_start,
.tx_queue_stop = sfc_tx_queue_stop,
.rx_queue_setup = sfc_rx_queue_setup,
.rx_queue_release = sfc_rx_queue_release,
.rx_queue_count = sfc_rx_queue_count,

View File

@ -72,11 +72,6 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
rc = EINVAL;
}
if (tx_conf->tx_deferred_start != 0) {
sfc_err(sa, "TX queue deferred start is not supported (yet)");
rc = EINVAL;
}
if (tx_conf->tx_thresh.pthresh != 0 ||
tx_conf->tx_thresh.hthresh != 0 ||
tx_conf->tx_thresh.wthresh != 0) {
@ -198,6 +193,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
evq->txq = txq;
txq_info->txq = txq;
txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
return 0;
@ -425,6 +421,9 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
txq = txq_info->txq;
if (txq->state == SFC_TXQ_INITIALIZED)
return;
SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
txq->state &= ~SFC_TXQ_RUNNING;
@ -497,9 +496,12 @@ sfc_tx_start(struct sfc_adapter *sa)
goto fail_efx_tx_init;
for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
if (!(sa->txq_info[sw_index].deferred_start) ||
sa->txq_info[sw_index].deferred_started) {
rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
}
}
return 0;

View File

@ -91,6 +91,8 @@ sfc_txq_sw_index(const struct sfc_txq *txq)
struct sfc_txq_info {
unsigned int entries;
struct sfc_txq *txq;
boolean_t deferred_start;
boolean_t deferred_started;
};
int sfc_tx_init(struct sfc_adapter *sa);