net/octeontx2: add devargs to lock Rx/Tx contexts

Add device arguments to lock Rx/Tx contexts.
Application can either choose to lock Rx or Tx contexts by using
'lock_rx_ctx' or 'lock_tx_ctx' respectively per each port.

Example:
	-w 0002:02:00.0,lock_rx_ctx=1 -w 0002:03:00.0,lock_tx_ctx=1

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Reviewed-by: Andrzej Ostruszka <aostruszka@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2020-06-29 03:48:32 +05:30 committed by Ferruh Yigit
parent 676e0ce5be
commit 3b0bc725f0
5 changed files with 244 additions and 9 deletions

View File

@ -210,6 +210,22 @@ Runtime Config Options
With the above configuration, application can enable inline IPsec processing
on 128 SAs (SPI 0-127).
- ``Lock Rx contexts in NDC cache``
Lock Rx contexts in NDC cache by using ``lock_rx_ctx`` parameter.
For example::
-w 0002:02:00.0,lock_rx_ctx=1
- ``Lock Tx contexts in NDC cache``
Lock Tx contexts in NDC cache by using ``lock_tx_ctx`` parameter.
For example::
-w 0002:02:00.0,lock_tx_ctx=1
.. note::
Above devarg parameters are configurable per device, user needs to pass the

View File

@ -298,8 +298,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
NIX_CQ_ALIGN, dev->node);
if (rz == NULL) {
otx2_err("Failed to allocate mem for cq hw ring");
rc = -ENOMEM;
goto fail;
return -ENOMEM;
}
memset(rz->addr, 0, rz->len);
rxq->desc = (uintptr_t)rz->addr;
@ -348,7 +347,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
rc = otx2_mbox_process(mbox);
if (rc) {
otx2_err("Failed to init cq context");
goto fail;
return rc;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@ -387,12 +386,44 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
rc = otx2_mbox_process(mbox);
if (rc) {
otx2_err("Failed to init rq context");
goto fail;
return rc;
}
if (dev->lock_rx_ctx) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_LOCK;
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
* Flush it and retry
*/
otx2_mbox_msg_send(mbox, 0);
rc = otx2_mbox_wait_for_rsp(mbox, 0);
if (rc < 0) {
otx2_err("Failed to LOCK cq context");
return rc;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
otx2_err("Failed to LOCK rq context");
return -ENOMEM;
}
}
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_LOCK;
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to LOCK rq context");
return rc;
}
}
return 0;
fail:
return rc;
}
static int
@ -439,6 +470,40 @@ nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
return rc;
}
if (dev->lock_rx_ctx) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = rxq->rq;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_UNLOCK;
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
* Flush it and retry
*/
otx2_mbox_msg_send(mbox, 0);
rc = otx2_mbox_wait_for_rsp(mbox, 0);
if (rc < 0) {
otx2_err("Failed to UNLOCK cq context");
return rc;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
otx2_err("Failed to UNLOCK rq context");
return -ENOMEM;
}
}
aq->qidx = rxq->rq;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_UNLOCK;
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to UNLOCK rq context");
return rc;
}
}
return 0;
}
@ -724,6 +789,94 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
return flags;
}
static int
nix_sqb_lock(struct rte_mempool *mp)
{
struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
struct npa_aq_enq_req *req;
int rc;
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_LOCK;
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
if (!req) {
/* The shared memory buffer can be full.
* Flush it and retry
*/
otx2_mbox_msg_send(npa_lf->mbox, 0);
rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
if (rc < 0) {
otx2_err("Failed to LOCK AURA context");
return rc;
}
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
if (!req) {
otx2_err("Failed to LOCK POOL context");
return -ENOMEM;
}
}
req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
req->ctype = NPA_AQ_CTYPE_POOL;
req->op = NPA_AQ_INSTOP_LOCK;
rc = otx2_mbox_process(npa_lf->mbox);
if (rc < 0) {
otx2_err("Unable to lock POOL in NDC");
return rc;
}
return 0;
}
static int
nix_sqb_unlock(struct rte_mempool *mp)
{
struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
struct npa_aq_enq_req *req;
int rc;
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_UNLOCK;
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
if (!req) {
/* The shared memory buffer can be full.
* Flush it and retry
*/
otx2_mbox_msg_send(npa_lf->mbox, 0);
rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
if (rc < 0) {
otx2_err("Failed to UNLOCK AURA context");
return rc;
}
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
if (!req) {
otx2_err("Failed to UNLOCK POOL context");
return -ENOMEM;
}
}
req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
req->ctype = NPA_AQ_CTYPE_POOL;
req->op = NPA_AQ_INSTOP_UNLOCK;
rc = otx2_mbox_process(npa_lf->mbox);
if (rc < 0) {
otx2_err("Unable to UNLOCK AURA in NDC");
return rc;
}
return 0;
}
static int
nix_sq_init(struct otx2_eth_txq *txq)
{
@ -766,7 +919,20 @@ nix_sq_init(struct otx2_eth_txq *txq)
/* Many to one reduction */
sq->sq.qint_idx = txq->sq % dev->qints;
return otx2_mbox_process(mbox);
rc = otx2_mbox_process(mbox);
if (rc < 0)
return rc;
if (dev->lock_tx_ctx) {
sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
sq->qidx = txq->sq;
sq->ctype = NIX_AQ_CTYPE_SQ;
sq->op = NIX_AQ_INSTOP_LOCK;
rc = otx2_mbox_process(mbox);
}
return rc;
}
static int
@ -809,6 +975,20 @@ nix_sq_uninit(struct otx2_eth_txq *txq)
if (rc)
return rc;
if (dev->lock_tx_ctx) {
/* Unlock sq */
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = txq->sq;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_UNLOCK;
rc = otx2_mbox_process(mbox);
if (rc < 0)
return rc;
nix_sqb_unlock(txq->sqb_pool);
}
/* Read SQ and free sqb's */
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = txq->sq;
@ -930,6 +1110,8 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
}
nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
if (dev->lock_tx_ctx)
nix_sqb_lock(txq->sqb_pool);
return 0;
fail:

View File

@ -274,6 +274,8 @@ struct otx2_eth_dev {
bool dmac_filter_enable;
uint8_t lf_tx_stats;
uint8_t lf_rx_stats;
uint8_t lock_rx_ctx;
uint8_t lock_tx_ctx;
uint16_t flags;
uint16_t cints;
uint16_t qints;

View File

@ -126,6 +126,8 @@ parse_switch_header_type(const char *key, const char *value, void *extra_args)
#define OTX2_FLOW_MAX_PRIORITY "flow_max_priority"
#define OTX2_SWITCH_HEADER_TYPE "switch_header"
#define OTX2_RSS_TAG_AS_XOR "tag_as_xor"
#define OTX2_LOCK_RX_CTX "lock_rx_ctx"
#define OTX2_LOCK_TX_CTX "lock_tx_ctx"
int
otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
@ -136,9 +138,11 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
uint16_t switch_header_type = 0;
uint16_t flow_max_priority = 3;
uint16_t ipsec_in_max_spi = 1;
uint16_t scalar_enable = 0;
uint16_t rss_tag_as_xor = 0;
uint16_t scalar_enable = 0;
struct rte_kvargs *kvlist;
uint16_t lock_rx_ctx = 0;
uint16_t lock_tx_ctx = 0;
if (devargs == NULL)
goto null_devargs;
@ -163,6 +167,10 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
&parse_switch_header_type, &switch_header_type);
rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
&parse_flag, &rss_tag_as_xor);
rte_kvargs_process(kvlist, OTX2_LOCK_RX_CTX,
&parse_flag, &lock_rx_ctx);
rte_kvargs_process(kvlist, OTX2_LOCK_TX_CTX,
&parse_flag, &lock_tx_ctx);
otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
@ -171,6 +179,8 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
dev->scalar_ena = scalar_enable;
dev->rss_tag_as_xor = rss_tag_as_xor;
dev->max_sqb_count = sqb_count;
dev->lock_rx_ctx = lock_rx_ctx;
dev->lock_tx_ctx = lock_tx_ctx;
dev->rss_info.rss_size = rss_size;
dev->npc_flow.flow_prealloc_size = flow_prealloc_size;
dev->npc_flow.flow_max_priority = flow_max_priority;
@ -190,4 +200,6 @@ RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
OTX2_FLOW_MAX_PRIORITY "=<1-32>"
OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>"
OTX2_RSS_TAG_AS_XOR "=1"
OTX2_NPA_LOCK_MASK "=<1-65535>");
OTX2_NPA_LOCK_MASK "=<1-65535>"
OTX2_LOCK_RX_CTX "=1"
OTX2_LOCK_TX_CTX "=1");

View File

@ -33,6 +33,29 @@ otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev,
req->qidx = (group * rss->rss_size) + idx;
req->ctype = NIX_AQ_CTYPE_RSS;
req->op = NIX_AQ_INSTOP_INIT;
if (!dev->lock_rx_ctx)
continue;
req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!req) {
/* The shared memory buffer can be full.
* Flush it and retry
*/
otx2_mbox_msg_send(mbox, 0);
rc = otx2_mbox_wait_for_rsp(mbox, 0);
if (rc < 0)
return rc;
req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!req)
return -ENOMEM;
}
req->rss.rq = ind_tbl[idx];
/* Fill AQ info */
req->qidx = (group * rss->rss_size) + idx;
req->ctype = NIX_AQ_CTYPE_RSS;
req->op = NIX_AQ_INSTOP_LOCK;
}
otx2_mbox_msg_send(mbox, 0);