net/nfp: add queue stop and close helper functions

This commit does not introduce new features, just integrate some common
logic into helper functions to reduce the same logic and increase code
reuse, include queue stop and queue close logic, will be used when NFP
net stop and close.

queue stop: reset queue
queue close: reset and release queue

Modify NFP net stop and close function, use helper function to stop
and close queue instead of before logic.

Signed-off-by: Jin Liu <jin.liu@corigine.com>
Signed-off-by: Diana Wang <na.wang@corigine.com>
Signed-off-by: Peng Zhang <peng.zhang@corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Signed-off-by: Niklas Söderlund <niklas.soderlund@corigine.com>
This commit is contained in:
Jin Liu 2022-06-17 11:34:41 +02:00 committed by Ferruh Yigit
parent 52ddc4c2b6
commit 1c8d02bb69
4 changed files with 62 additions and 48 deletions

View File

@ -1320,6 +1320,56 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
return ret;
}
void
nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
{
uint16_t i;
struct nfp_net_rxq *this_rx_q;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}
}
void
nfp_net_close_rx_queue(struct rte_eth_dev *dev)
{
uint16_t i;
struct nfp_net_rxq *this_rx_q;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
nfp_net_rx_queue_release(dev, i);
}
}
void
nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
{
uint16_t i;
struct nfp_net_txq *this_tx_q;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}
}
void
nfp_net_close_tx_queue(struct rte_eth_dev *dev)
{
uint16_t i;
struct nfp_net_txq *this_tx_q;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
nfp_net_tx_queue_release(dev, i);
}
}
RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
/*

View File

@ -413,6 +413,10 @@ int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
int nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
int nfp_net_rss_config_default(struct rte_eth_dev *dev);
void nfp_net_stop_rx_queue(struct rte_eth_dev *dev);
void nfp_net_close_rx_queue(struct rte_eth_dev *dev);
void nfp_net_stop_tx_queue(struct rte_eth_dev *dev);
void nfp_net_close_tx_queue(struct rte_eth_dev *dev);
#define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\
(&((struct nfp_net_adapter *)adapter)->hw)

View File

@ -181,10 +181,7 @@ error:
static int
nfp_net_stop(struct rte_eth_dev *dev)
{
int i;
struct nfp_net_hw *hw;
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
PMD_INIT_LOG(DEBUG, "Stop");
@ -193,15 +190,9 @@ nfp_net_stop(struct rte_eth_dev *dev)
nfp_net_disable_queues(dev);
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}
nfp_net_stop_tx_queue(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}
nfp_net_stop_rx_queue(dev);
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
/* Configure the physical port down */
@ -256,8 +247,6 @@ nfp_net_close(struct rte_eth_dev *dev)
struct nfp_net_hw *hw;
struct rte_pci_device *pci_dev;
struct nfp_pf_dev *pf_dev;
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
int i;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@ -277,17 +266,9 @@ nfp_net_close(struct rte_eth_dev *dev)
nfp_net_disable_queues(dev);
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
nfp_net_tx_queue_release(dev, i);
}
nfp_net_close_tx_queue(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
nfp_net_rx_queue_release(dev, i);
}
nfp_net_close_rx_queue(dev);
/* Cancel possible impending LSC work here before releasing the port*/
rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,

View File

@ -145,24 +145,14 @@ error:
static int
nfp_netvf_stop(struct rte_eth_dev *dev)
{
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
int i;
PMD_INIT_LOG(DEBUG, "Stop");
nfp_net_disable_queues(dev);
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
}
nfp_net_stop_tx_queue(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
}
nfp_net_stop_rx_queue(dev);
return 0;
}
@ -185,9 +175,6 @@ static int
nfp_netvf_close(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
int i;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@ -204,17 +191,9 @@ nfp_netvf_close(struct rte_eth_dev *dev)
nfp_net_disable_queues(dev);
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
nfp_net_reset_tx_queue(this_tx_q);
nfp_net_tx_queue_release(dev, i);
}
nfp_net_close_tx_queue(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
nfp_net_reset_rx_queue(this_rx_q);
nfp_net_rx_queue_release(dev, i);
}
nfp_net_close_rx_queue(dev);
rte_intr_disable(pci_dev->intr_handle);