common/cnxk: delay inline device RQ enable to dev start

Similar to other RQ's, delay inline device RQ until dev is started
to avoid traffic reception when device is stopped.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2022-09-12 18:44:01 +05:30 committed by Jerin Jacob
parent 93c6b6b271
commit da1ec39060
5 changed files with 52 additions and 10 deletions

View File

@ -17,6 +17,4 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
#endif /* _ROC_IDEV_H_ */

View File

@ -245,6 +245,9 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
struct roc_cpt *roc_cpt;
struct roc_cpt_rxc_time_cfg cfg;
if (!idev)
return -EFAULT;
PLT_SET_USED(max_frags);
if (idev == NULL)
return -ENOTSUP;
@ -587,7 +590,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
}
int
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
{
struct idev_cfg *idev = idev_get_cfg();
int port_id = rq->roc_nix->port_id;
@ -688,9 +691,9 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
/* Prepare and send RQ init mbox */
if (roc_model_is_cn9k())
rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, enable);
else
rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, enable);
if (rc) {
plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
return rc;
@ -755,6 +758,31 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
return rc;
}
int
roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_nix_rq *inl_rq = roc_nix_inl_dev_rq(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
int rc;
if (!idev)
return -EFAULT;
if (nix->inb_inl_dev) {
if (!inl_rq || !idev->nix_inl_dev)
return -EFAULT;
inl_dev = idev->nix_inl_dev;
rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
if (rc)
return rc;
}
return 0;
}
void
roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
{

View File

@ -165,7 +165,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
bool inl_dev_sa, uint32_t spi);
void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
@ -175,6 +175,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
uint16_t max_frags);
int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
bool inb_inl_dev);
int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
/* NIX Inline Outbound API */
int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@ -189,6 +190,8 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
void *args);
int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
bool poll);
uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
/* NIX Inline/Outbound API */
enum roc_nix_inl_sa_sync_op {
ROC_NIX_INL_SA_OP_FLUSH,

View File

@ -90,7 +90,6 @@ INTERNAL {
roc_hash_sha512_gen;
roc_idev_cpt_get;
roc_idev_cpt_set;
roc_nix_inl_outb_ring_base_get;
roc_idev_lmt_base_addr_get;
roc_idev_npa_maxpools_get;
roc_idev_npa_maxpools_set;
@ -137,11 +136,13 @@ INTERNAL {
roc_nix_get_vwqe_interval;
roc_nix_inl_cb_register;
roc_nix_inl_cb_unregister;
roc_nix_inl_ctx_write;
roc_nix_inl_dev_dump;
roc_nix_inl_dev_fini;
roc_nix_inl_dev_init;
roc_nix_inl_dev_is_probed;
roc_nix_inl_dev_lock;
roc_nix_inl_dev_pffunc_get;
roc_nix_inl_dev_rq;
roc_nix_inl_dev_rq_get;
roc_nix_inl_dev_rq_put;
@ -163,11 +164,11 @@ INTERNAL {
roc_nix_inl_outb_sa_base_get;
roc_nix_inl_outb_sso_pffunc_get;
roc_nix_inl_outb_is_enabled;
roc_nix_inl_outb_ring_base_get;
roc_nix_inl_outb_soft_exp_poll_switch;
roc_nix_inl_rq_ena_dis;
roc_nix_inl_sa_sync;
roc_nix_inl_ts_pkind_set;
roc_nix_inl_ctx_write;
roc_nix_inl_dev_pffunc_get;
roc_nix_inl_outb_cpt_lfs_dump;
roc_nix_cpt_ctx_cache_sync;
roc_nix_is_lbk;

View File

@ -660,7 +660,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
/* Setup rq reference for inline dev if present */
rc = roc_nix_inl_dev_rq_get(rq);
rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
if (rc)
goto free_mem;
}
@ -1477,6 +1477,10 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
/* Stop inline device RQ first */
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
roc_nix_inl_rq_ena_dis(&dev->nix, false);
/* Stop rx queues and free up pkts pending */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rc = dev_ops->rx_queue_stop(eth_dev, i);
@ -1522,6 +1526,14 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
return rc;
}
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
if (rc) {
plt_err("Failed to enable Inline device RQ, rc=%d", rc);
return rc;
}
}
/* Start tx queues */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
rc = cnxk_nix_tx_queue_start(eth_dev, i);