net/cnxk: support queue start and stop

Add Rx/Tx queue start and stop callbacks for
CN9K and CN10K.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2021-06-23 10:16:18 +05:30 committed by Jerin Jacob
parent 7ee79e83fd
commit 06d7544052
7 changed files with 128 additions and 0 deletions

View File

@ -12,6 +12,7 @@ Link status = Y
Link status event = Y Link status event = Y
Runtime Rx queue setup = Y Runtime Rx queue setup = Y
Runtime Tx queue setup = Y Runtime Tx queue setup = Y
Queue start/stop = Y
RSS hash = Y RSS hash = Y
Inner RSS = Y Inner RSS = Y
Packet type parsing = Y Packet type parsing = Y

View File

@ -12,6 +12,7 @@ Link status = Y
Link status event = Y Link status event = Y
Runtime Rx queue setup = Y Runtime Rx queue setup = Y
Runtime Tx queue setup = Y Runtime Tx queue setup = Y
Queue start/stop = Y
RSS hash = Y RSS hash = Y
Inner RSS = Y Inner RSS = Y
Packet type parsing = Y Packet type parsing = Y

View File

@ -11,6 +11,7 @@ Link status = Y
Link status event = Y Link status event = Y
Runtime Rx queue setup = Y Runtime Rx queue setup = Y
Runtime Tx queue setup = Y Runtime Tx queue setup = Y
Queue start/stop = Y
RSS hash = Y RSS hash = Y
Inner RSS = Y Inner RSS = Y
Packet type parsing = Y Packet type parsing = Y

View File

@ -137,6 +137,21 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
return 0; return 0;
} }
static int
cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
{
struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
int rc;
rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
if (rc)
return rc;
/* Clear fc cache pkts to trigger worker stop */
txq->fc_cache_pkts = 0;
return 0;
}
static int static int
cn10k_nix_configure(struct rte_eth_dev *eth_dev) cn10k_nix_configure(struct rte_eth_dev *eth_dev)
{ {
@ -169,6 +184,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure; cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup; cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup; cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set; cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
} }

View File

@ -135,6 +135,21 @@ cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
return 0; return 0;
} }
static int
cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
{
struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
int rc;
rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
if (rc)
return rc;
/* Clear fc cache pkts to trigger worker stop */
txq->fc_cache_pkts = 0;
return 0;
}
static int static int
cn9k_nix_configure(struct rte_eth_dev *eth_dev) cn9k_nix_configure(struct rte_eth_dev *eth_dev)
{ {
@ -178,6 +193,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure; cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup; cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup; cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set; cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
} }

View File

@ -866,12 +866,104 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
return rc; return rc;
} }
static int
cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct roc_nix_sq *sq = &dev->sqs[qid];
int rc = -EINVAL;
if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
return 0;
rc = roc_nix_tm_sq_aura_fc(sq, true);
if (rc) {
plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
goto done;
}
data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
done:
return rc;
}
int
cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct roc_nix_sq *sq = &dev->sqs[qid];
int rc;
if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
return 0;
rc = roc_nix_tm_sq_aura_fc(sq, false);
if (rc) {
plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
rc);
goto done;
}
data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
done:
return rc;
}
static int
cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct roc_nix_rq *rq = &dev->rqs[qid];
int rc;
if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
return 0;
rc = roc_nix_rq_ena_dis(rq, true);
if (rc) {
plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
goto done;
}
data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
done:
return rc;
}
static int
cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct roc_nix_rq *rq = &dev->rqs[qid];
int rc;
if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
return 0;
rc = roc_nix_rq_ena_dis(rq, false);
if (rc) {
plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
goto done;
}
data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
done:
return rc;
}
/* CNXK platform independent eth dev ops */ /* CNXK platform independent eth dev ops */
struct eth_dev_ops cnxk_eth_dev_ops = { struct eth_dev_ops cnxk_eth_dev_ops = {
.dev_infos_get = cnxk_nix_info_get, .dev_infos_get = cnxk_nix_info_get,
.link_update = cnxk_nix_link_update, .link_update = cnxk_nix_link_update,
.tx_queue_release = cnxk_nix_tx_queue_release, .tx_queue_release = cnxk_nix_tx_queue_release,
.rx_queue_release = cnxk_nix_rx_queue_release, .rx_queue_release = cnxk_nix_rx_queue_release,
.tx_queue_start = cnxk_nix_tx_queue_start,
.rx_queue_start = cnxk_nix_rx_queue_start,
.rx_queue_stop = cnxk_nix_rx_queue_stop,
.dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get, .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
}; };

View File

@ -214,6 +214,7 @@ int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_rx_q_sz, uint16_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf, const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp); struct rte_mempool *mp);
int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev); uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);