net/failsafe: add Tx queue start and stop functions

Support Tx queue deferred start.

Signed-off-by: Ian Dolzhansky <ian.dolzhansky@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Gaetan Rivet <gaetan.rivet@6wind.com>
This commit is contained in:
Ian Dolzhansky 2018-09-20 14:55:52 +01:00 committed by Ferruh Yigit
parent 3db7001e51
commit b32c9075ee
4 changed files with 121 additions and 8 deletions

View File

@ -7,7 +7,7 @@
Link status = Y
Link status event = Y
Rx interrupt = Y
Queue start/stop = P
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Promiscuous mode = Y

View File

@ -76,8 +76,8 @@ New Features
Updated the failsafe driver including the following changes:
* Support for Rx queues start and stop.
* Support for Rx queues deferred start.
* Support for Rx and Tx queues start and stop.
* Support for Rx and Tx queues deferred start.
* **Added ability to switch queue deferred start flag on testpmd app.**

View File

@ -407,6 +407,47 @@ failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
return 0;
}
static int
failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
{
struct txq *txq;
int ret;
uint16_t i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq->info.conf.tx_deferred_start &&
dev->data->tx_queue_state[i] ==
RTE_ETH_QUEUE_STATE_STARTED) {
/*
* The subdevice Tx queue does not launch on device
* start if deferred start flag is set. It needs to be
* started manually in case an appropriate failsafe Tx
* queue has been started earlier.
*/
ret = dev->dev_ops->tx_queue_start(dev, i);
if (ret) {
ERROR("Could not synchronize Tx queue %d", i);
return ret;
}
} else if (dev->data->tx_queue_state[i] ==
RTE_ETH_QUEUE_STATE_STOPPED) {
/*
* The subdevice Tx queue needs to be stopped manually
* in case an appropriate failsafe Tx queue has been
* stopped earlier.
*/
ret = dev->dev_ops->tx_queue_stop(dev, i);
if (ret) {
ERROR("Could not synchronize Tx queue %d", i);
return ret;
}
}
}
return 0;
}
int
failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
{
@ -466,6 +507,9 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
if (ret)
goto err_remove;
ret = failsafe_eth_dev_rx_queues_sync(dev);
if (ret)
goto err_remove;
ret = failsafe_eth_dev_tx_queues_sync(dev);
if (ret)
goto err_remove;
return 0;

View File

@ -172,6 +172,7 @@ static void
fs_set_queues_state_start(struct rte_eth_dev *dev)
{
struct rxq *rxq;
struct txq *txq;
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@ -180,6 +181,12 @@ fs_set_queues_state_start(struct rte_eth_dev *dev)
dev->data->rx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STARTED;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (!txq->info.conf.tx_deferred_start)
dev->data->tx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STARTED;
}
}
static int
@ -232,6 +239,8 @@ fs_set_queues_state_stop(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++)
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
for (i = 0; i < dev->data->nb_tx_queues; i++)
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
static void
@ -371,6 +380,59 @@ fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return 0;
}
static int
fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct sub_device *sdev;
uint8_t i;
int ret;
int err = 0;
bool failure = true;
fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
uint16_t port_id = ETH(sdev)->data->port_id;
ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
ret = fs_err(sdev, ret);
if (ret) {
ERROR("Tx queue stop failed for subdevice %d", i);
err = ret;
} else {
failure = false;
}
}
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
fs_unlock(dev, 0);
/* Return 0 in case of at least one successful queue stop */
return (failure) ? err : 0;
}
static int
fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct sub_device *sdev;
uint8_t i;
int ret;
fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
uint16_t port_id = ETH(sdev)->data->port_id;
ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
ret = fs_err(sdev, ret);
if (ret) {
ERROR("Tx queue start failed for subdevice %d", i);
fs_tx_queue_stop(dev, tx_queue_id);
fs_unlock(dev, 0);
return ret;
}
}
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
fs_unlock(dev, 0);
return 0;
}
static void
fs_rx_queue_release(void *queue)
{
@ -592,12 +654,17 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
uint8_t i;
int ret;
if (tx_conf->tx_deferred_start) {
ERROR("Tx queue deferred start is not supported");
return -EINVAL;
}
fs_lock(dev, 0);
if (tx_conf->tx_deferred_start) {
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
if (SUBOPS(sdev, tx_queue_start) == NULL) {
ERROR("Tx queue deferred start is not "
"supported for subdevice %d", i);
fs_unlock(dev, 0);
return -EINVAL;
}
}
}
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
@ -1127,6 +1194,8 @@ const struct eth_dev_ops failsafe_ops = {
.vlan_filter_set = fs_vlan_filter_set,
.rx_queue_start = fs_rx_queue_start,
.rx_queue_stop = fs_rx_queue_stop,
.tx_queue_start = fs_tx_queue_start,
.tx_queue_stop = fs_tx_queue_stop,
.rx_queue_setup = fs_rx_queue_setup,
.tx_queue_setup = fs_tx_queue_setup,
.rx_queue_release = fs_rx_queue_release,