diff --git a/doc/guides/nics/features/failsafe.ini b/doc/guides/nics/features/failsafe.ini index 712c0b7f7a..74eae4a620 100644 --- a/doc/guides/nics/features/failsafe.ini +++ b/doc/guides/nics/features/failsafe.ini @@ -7,7 +7,7 @@ Link status = Y Link status event = Y Rx interrupt = Y -Queue start/stop = P +Queue start/stop = Y MTU update = Y Jumbo frame = Y Promiscuous mode = Y diff --git a/doc/guides/rel_notes/release_18_11.rst b/doc/guides/rel_notes/release_18_11.rst index 485caf6792..d4ef2efc6b 100644 --- a/doc/guides/rel_notes/release_18_11.rst +++ b/doc/guides/rel_notes/release_18_11.rst @@ -76,8 +76,8 @@ New Features Updated the failsafe driver including the following changes: - * Support for Rx queues start and stop. - * Support for Rx queues deferred start. + * Support for Rx and Tx queues start and stop. + * Support for Rx and Tx queues deferred start. * **Added ability to switch queue deferred start flag on testpmd app.** diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c index 305deed638..191f95f147 100644 --- a/drivers/net/failsafe/failsafe_ether.c +++ b/drivers/net/failsafe/failsafe_ether.c @@ -407,6 +407,47 @@ failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev) return 0; } +static int +failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev) +{ + struct txq *txq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq->info.conf.tx_deferred_start && + dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Tx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Tx + * queue has been started earlier. + */ + ret = dev->dev_ops->tx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } else if (dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Tx queue needs to be stopped manually + * in case an appropriate failsafe Tx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->tx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } + } + return 0; +} + int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) { @@ -466,6 +507,9 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) if (ret) goto err_remove; ret = failsafe_eth_dev_rx_queues_sync(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_tx_queues_sync(dev); if (ret) goto err_remove; return 0; diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c index b3bfacb363..84f4b7ab8b 100644 --- a/drivers/net/failsafe/failsafe_ops.c +++ b/drivers/net/failsafe/failsafe_ops.c @@ -172,6 +172,7 @@ static void fs_set_queues_state_start(struct rte_eth_dev *dev) { struct rxq *rxq; + struct txq *txq; uint16_t i; for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -180,6 +181,12 @@ fs_set_queues_state_start(struct rte_eth_dev *dev) dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq->info.conf.tx_deferred_start) + dev->data->tx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } } static int @@ -232,6 +239,8 @@ fs_set_queues_state_stop(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } static void @@ -371,6 +380,59 @@ fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return 0; } +static int +fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + int err = 0; + bool failure = true; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + uint16_t port_id = ETH(sdev)->data->port_id; + + ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Tx queue stop failed for subdevice %d", i); + err = ret; + } else { + failure = false; + } + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + fs_unlock(dev, 0); + /* Return 0 in case of at least one successful queue stop */ + return (failure) ? err : 0; +} + +static int +fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + uint16_t port_id = ETH(sdev)->data->port_id; + + ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Tx queue start failed for subdevice %d", i); + fs_tx_queue_stop(dev, tx_queue_id); + fs_unlock(dev, 0); + return ret; + } + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + fs_unlock(dev, 0); + return 0; +} + static void fs_rx_queue_release(void *queue) { @@ -592,12 +654,17 @@ fs_tx_queue_setup(struct rte_eth_dev *dev, uint8_t i; int ret; - if (tx_conf->tx_deferred_start) { - ERROR("Tx queue deferred start is not supported"); - return -EINVAL; - } - fs_lock(dev, 0); + if (tx_conf->tx_deferred_start) { + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + if (SUBOPS(sdev, tx_queue_start) == NULL) { + ERROR("Tx queue deferred start is not " + "supported for subdevice %d", i); + fs_unlock(dev, 0); + return -EINVAL; + } + } + } txq = dev->data->tx_queues[tx_queue_id]; if (txq != NULL) { fs_tx_queue_release(txq); @@ -1127,6 +1194,8 @@ const struct eth_dev_ops failsafe_ops = { .vlan_filter_set = fs_vlan_filter_set, .rx_queue_start = fs_rx_queue_start, .rx_queue_stop = fs_rx_queue_stop, + .tx_queue_start = fs_tx_queue_start, + .tx_queue_stop = fs_tx_queue_stop, .rx_queue_setup = fs_rx_queue_setup, .tx_queue_setup = fs_tx_queue_setup, .rx_queue_release = fs_rx_queue_release,