mempool/octeontx2: add devargs to lock context in cache

Add device arguments to lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
Example:
	-w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2020-05-11 15:37:38 +05:30 committed by Thomas Monjalon
parent 1da6702e3c
commit 31246a328f
12 changed files with 158 additions and 6 deletions

View File

@ -148,6 +148,16 @@ Runtime Config Options
-w 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0] -w 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0]
- ``Lock NPA contexts in NDC``
Lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
For example::
-w 0002:0e:00.0,npa_lock_mask=0xf
Debugging Options Debugging Options
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~

View File

@ -61,6 +61,16 @@ Runtime Config Options
provide ``max_pools`` parameter to the first PCIe device probed by the given provide ``max_pools`` parameter to the first PCIe device probed by the given
application. application.
- ``Lock NPA contexts in NDC``
Lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
For example::
-w 0002:02:00.0,npa_lock_mask=0xf
Debugging Options Debugging Options
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~

View File

@ -195,6 +195,7 @@ Runtime Config Options
Setting this flag to 1 to select the legacy mode. Setting this flag to 1 to select the legacy mode.
For example to select the legacy mode(RSS tag adder as XOR):: For example to select the legacy mode(RSS tag adder as XOR)::
-w 0002:02:00.0,tag_as_xor=1 -w 0002:02:00.0,tag_as_xor=1
- ``Max SPI for inbound inline IPsec`` (default ``1``) - ``Max SPI for inbound inline IPsec`` (default ``1``)
@ -203,6 +204,7 @@ Runtime Config Options
``ipsec_in_max_spi`` ``devargs`` parameter. ``ipsec_in_max_spi`` ``devargs`` parameter.
For example:: For example::
-w 0002:02:00.0,ipsec_in_max_spi=128 -w 0002:02:00.0,ipsec_in_max_spi=128
With the above configuration, application can enable inline IPsec processing With the above configuration, application can enable inline IPsec processing
@ -214,6 +216,16 @@ Runtime Config Options
parameters to all the PCIe devices if application requires to configure on parameters to all the PCIe devices if application requires to configure on
all the ethdev ports. all the ethdev ports.
- ``Lock NPA contexts in NDC``
Lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
For example::
-w 0002:02:00.0,npa_lock_mask=0xf
.. _otx2_tmapi: .. _otx2_tmapi:
Traffic Management API Traffic Management API

View File

@ -34,6 +34,6 @@ SRCS-y += otx2_common.c
SRCS-y += otx2_sec_idev.c SRCS-y += otx2_sec_idev.c
LDLIBS += -lrte_eal LDLIBS += -lrte_eal
LDLIBS += -lrte_ethdev LDLIBS += -lrte_ethdev -lrte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk include $(RTE_SDK)/mk/rte.lib.mk

View File

@ -21,6 +21,6 @@ foreach flag: extra_flags
endif endif
endforeach endforeach
deps = ['eal', 'pci', 'ethdev'] deps = ['eal', 'pci', 'ethdev', 'kvargs']
includes += include_directories('../../common/octeontx2', includes += include_directories('../../common/octeontx2',
'../../mempool/octeontx2', '../../bus/pci') '../../mempool/octeontx2', '../../bus/pci')

View File

@ -169,6 +169,40 @@ int otx2_npa_lf_obj_ref(void)
return cnt ? 0 : -EINVAL; return cnt ? 0 : -EINVAL;
} }
static int
parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint64_t val;
val = strtoull(value, NULL, 16);
*(uint64_t *)extra_args = val;
return 0;
}
/*
* @internal
* Parse common device arguments
*/
void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
{
struct otx2_idev_cfg *idev;
uint64_t npa_lock_mask = 0;
idev = otx2_intra_dev_get_cfg();
if (idev == NULL)
return;
rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
&parse_npa_lock_mask, &npa_lock_mask);
idev->npa_lock_mask = npa_lock_mask;
}
/** /**
* @internal * @internal
*/ */

View File

@ -8,6 +8,7 @@
#include <rte_atomic.h> #include <rte_atomic.h>
#include <rte_common.h> #include <rte_common.h>
#include <rte_cycles.h> #include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_memory.h> #include <rte_memory.h>
#include <rte_memzone.h> #include <rte_memzone.h>
#include <rte_io.h> #include <rte_io.h>
@ -49,6 +50,8 @@
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif #endif
#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
/* Intra device related functions */ /* Intra device related functions */
struct otx2_npa_lf; struct otx2_npa_lf;
struct otx2_idev_cfg { struct otx2_idev_cfg {
@ -60,6 +63,7 @@ struct otx2_idev_cfg {
rte_atomic16_t npa_refcnt; rte_atomic16_t npa_refcnt;
uint16_t npa_refcnt_u16; uint16_t npa_refcnt_u16;
}; };
uint64_t npa_lock_mask;
}; };
__rte_internal __rte_internal
@ -78,6 +82,8 @@ __rte_internal
int otx2_npa_lf_active(void *dev); int otx2_npa_lf_active(void *dev);
__rte_internal __rte_internal
int otx2_npa_lf_obj_ref(void); int otx2_npa_lf_obj_ref(void);
__rte_internal
void otx2_parse_common_devargs(struct rte_kvargs *kvlist);
/* Log */ /* Log */
extern int otx2_logtype_base; extern int otx2_logtype_base;

View File

@ -32,6 +32,7 @@ INTERNAL {
otx2_npa_lf_obj_ref; otx2_npa_lf_obj_ref;
otx2_npa_pf_func_get; otx2_npa_pf_func_get;
otx2_npa_set_defaults; otx2_npa_set_defaults;
otx2_parse_common_devargs;
otx2_register_irq; otx2_register_irq;
otx2_sec_idev_cfg_init; otx2_sec_idev_cfg_init;
otx2_sec_idev_tx_cpt_qp_add; otx2_sec_idev_tx_cpt_qp_add;

View File

@ -1659,7 +1659,7 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
&single_ws); &single_ws);
rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict, rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
dev); dev);
otx2_parse_common_devargs(kvlist);
dev->dual_ws = !single_ws; dev->dual_ws = !single_ws;
rte_kvargs_free(kvlist); rte_kvargs_free(kvlist);
} }
@ -1821,4 +1821,5 @@ RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>" RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
OTX2_SSO_SINGLE_WS "=1" OTX2_SSO_SINGLE_WS "=1"
OTX2_SSO_GGRP_QOS "=<string>" OTX2_SSO_GGRP_QOS "=<string>"
OTX2_SSO_SELFTEST "=1"); OTX2_SSO_SELFTEST "=1"
OTX2_NPA_LOCK_MASK "=<1-65535>");

View File

@ -191,6 +191,7 @@ otx2_parse_aura_size(struct rte_devargs *devargs)
goto exit; goto exit;
rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz); rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist); rte_kvargs_free(kvlist);
exit: exit:
return aura_sz; return aura_sz;
@ -452,4 +453,5 @@ RTE_PMD_REGISTER_PCI(mempool_octeontx2, pci_npa);
RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map); RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci"); RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2, RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
OTX2_MAX_POOLS "=<128-1048576>"); OTX2_MAX_POOLS "=<128-1048576>"
OTX2_NPA_LOCK_MASK "=<1-65535>");

View File

@ -348,8 +348,13 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
struct npa_aq_enq_req *aura_init_req, *pool_init_req; struct npa_aq_enq_req *aura_init_req, *pool_init_req;
struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp; struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0]; struct otx2_mbox_dev *mdev = &mbox->dev[0];
struct otx2_idev_cfg *idev;
int rc, off; int rc, off;
idev = otx2_intra_dev_get_cfg();
if (idev == NULL)
return -ENOMEM;
aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox); aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
aura_init_req->aura_id = aura_id; aura_init_req->aura_id = aura_id;
@ -379,6 +384,44 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
return 0; return 0;
else else
return NPA_LF_ERR_AURA_POOL_INIT; return NPA_LF_ERR_AURA_POOL_INIT;
if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
return 0;
aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
aura_init_req->aura_id = aura_id;
aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
aura_init_req->op = NPA_AQ_INSTOP_LOCK;
pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
if (!pool_init_req) {
/* The shared memory buffer can be full.
* Flush it and retry
*/
otx2_mbox_msg_send(mbox, 0);
rc = otx2_mbox_wait_for_rsp(mbox, 0);
if (rc < 0) {
otx2_err("Failed to LOCK AURA context");
return -ENOMEM;
}
pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
if (!pool_init_req) {
otx2_err("Failed to LOCK POOL context");
return -ENOMEM;
}
}
pool_init_req->aura_id = aura_id;
pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
pool_init_req->op = NPA_AQ_INSTOP_LOCK;
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to lock POOL ctx to NDC");
return -ENOMEM;
}
return 0;
} }
static int static int
@ -390,8 +433,13 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
struct npa_aq_enq_rsp *aura_rsp, *pool_rsp; struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0]; struct otx2_mbox_dev *mdev = &mbox->dev[0];
struct ndc_sync_op *ndc_req; struct ndc_sync_op *ndc_req;
struct otx2_idev_cfg *idev;
int rc, off; int rc, off;
idev = otx2_intra_dev_get_cfg();
if (idev == NULL)
return -EINVAL;
/* Procedure for disabling an aura/pool */ /* Procedure for disabling an aura/pool */
rte_delay_us(10); rte_delay_us(10);
npa_lf_aura_op_alloc(aura_handle, 0); npa_lf_aura_op_alloc(aura_handle, 0);
@ -434,6 +482,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
otx2_err("Error on NDC-NPA LF sync, rc %d", rc); otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
return NPA_LF_ERR_AURA_POOL_FINI; return NPA_LF_ERR_AURA_POOL_FINI;
} }
if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
return 0;
aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
aura_req->aura_id = aura_id;
aura_req->ctype = NPA_AQ_CTYPE_AURA;
aura_req->op = NPA_AQ_INSTOP_UNLOCK;
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to unlock AURA ctx to NDC");
return -EINVAL;
}
pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
pool_req->aura_id = aura_id;
pool_req->ctype = NPA_AQ_CTYPE_POOL;
pool_req->op = NPA_AQ_INSTOP_UNLOCK;
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to unlock POOL ctx to NDC");
return -EINVAL;
}
return 0; return 0;
} }

View File

@ -163,6 +163,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
&parse_switch_header_type, &switch_header_type); &parse_switch_header_type, &switch_header_type);
rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR, rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
&parse_flag, &rss_tag_as_xor); &parse_flag, &rss_tag_as_xor);
otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist); rte_kvargs_free(kvlist);
null_devargs: null_devargs:
@ -188,4 +189,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
OTX2_FLOW_PREALLOC_SIZE "=<1-32>" OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
OTX2_FLOW_MAX_PRIORITY "=<1-32>" OTX2_FLOW_MAX_PRIORITY "=<1-32>"
OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>" OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>"
OTX2_RSS_TAG_AS_XOR "=1"); OTX2_RSS_TAG_AS_XOR "=1"
OTX2_NPA_LOCK_MASK "=<1-65535>");