common/cnxk: support zero AURA for inline inbound meta

Add support to create zero aura for inline inbound meta packets when
platform supports it.

AURA zero will hold as many buffers as all the available
pkt pool with a data to accommodate 384B in best case to store
meta packets coming from Inline IPsec.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2022-09-12 18:44:05 +05:30 committed by Jerin Jacob
parent aaea4c74b7
commit 0f3f3ad850
11 changed files with 270 additions and 0 deletions

View File

@ -241,3 +241,13 @@ idev_sso_set(struct roc_sso *sso)
if (idev != NULL)
__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
}
uint64_t
roc_idev_nix_inl_meta_aura_get(void)
{
struct idev_cfg *idev = idev_get_cfg();
if (idev != NULL)
return idev->inl_cfg.meta_aura;
return 0;
}

View File

@ -16,5 +16,6 @@ struct roc_cpt *__roc_api roc_idev_cpt_get(void);
void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
#endif /* _ROC_IDEV_H_ */

View File

@ -10,6 +10,14 @@ struct npa_lf;
struct roc_bphy;
struct roc_cpt;
struct nix_inl_dev;
struct idev_nix_inl_cfg {
uint64_t meta_aura;
uint32_t nb_bufs;
uint32_t buf_sz;
uint32_t refs;
};
struct idev_cfg {
uint16_t sso_pf_func;
uint16_t npa_pf_func;
@ -23,6 +31,7 @@ struct idev_cfg {
struct roc_cpt *cpt;
struct roc_sso *sso;
struct nix_inl_dev *nix_inl_dev;
struct idev_nix_inl_cfg inl_cfg;
plt_spinlock_t nix_inl_dev_lock;
};

View File

@ -321,6 +321,7 @@ struct roc_nix_rq {
bool spb_drop_ena;
/* End of Input parameters */
struct roc_nix *roc_nix;
uint64_t meta_aura_handle;
uint16_t inl_dev_refs;
};

View File

@ -6,6 +6,7 @@
#include "roc_priv.h"
uint32_t soft_exp_consumer_cnt;
roc_nix_inl_meta_pool_cb_t meta_pool_cb;
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
@ -18,6 +19,155 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
static int
nix_inl_meta_aura_destroy(void)
{
struct idev_cfg *idev = idev_get_cfg();
struct idev_nix_inl_cfg *inl_cfg;
int rc;
if (!idev)
return -EINVAL;
inl_cfg = &idev->inl_cfg;
/* Destroy existing Meta aura */
if (inl_cfg->meta_aura) {
uint64_t avail, limit;
/* Check if all buffers are back to pool */
avail = roc_npa_aura_op_available(inl_cfg->meta_aura);
limit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);
if (avail != limit)
plt_warn("Not all buffers are back to meta pool,"
" %" PRIu64 " != %" PRIu64, avail, limit);
rc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);
if (rc) {
plt_err("Failed to destroy meta aura, rc=%d", rc);
return rc;
}
inl_cfg->meta_aura = 0;
inl_cfg->buf_sz = 0;
inl_cfg->nb_bufs = 0;
inl_cfg->refs = 0;
}
return 0;
}
static int
nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)
{
uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
struct idev_nix_inl_cfg *inl_cfg;
struct nix_inl_dev *nix_inl_dev;
uint32_t nb_bufs, buf_sz;
int rc;
inl_cfg = &idev->inl_cfg;
nix_inl_dev = idev->nix_inl_dev;
/* Override meta buf count from devargs if present */
if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
nb_bufs = nix_inl_dev->nb_meta_bufs;
else
nb_bufs = roc_npa_buf_type_limit_get(mask);
/* Override meta buf size from devargs if present */
if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
buf_sz = nix_inl_dev->meta_buf_sz;
else
buf_sz = first_skip + NIX_INL_META_SIZE;
/* Allocate meta aura */
rc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);
if (rc) {
plt_err("Failed to allocate meta aura, rc=%d", rc);
return rc;
}
inl_cfg->buf_sz = buf_sz;
inl_cfg->nb_bufs = nb_bufs;
return 0;
}
int
roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
struct idev_nix_inl_cfg *inl_cfg;
uint32_t actual, expected;
uint64_t mask, type_mask;
int rc;
if (!idev || !meta_pool_cb)
return -EFAULT;
inl_cfg = &idev->inl_cfg;
/* Create meta aura if not present */
if (!inl_cfg->meta_aura) {
rc = nix_inl_meta_aura_create(idev, rq->first_skip);
if (rc)
return rc;
}
/* Validate if we have enough meta buffers */
mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
expected = roc_npa_buf_type_limit_get(mask);
actual = inl_cfg->nb_bufs;
if (actual < expected) {
plt_err("Insufficient buffers in meta aura %u < %u (expected)",
actual, expected);
return -EIO;
}
/* Validate if we have enough space for meta buffer */
if (rq->first_skip + NIX_INL_META_SIZE > inl_cfg->buf_sz) {
plt_err("Meta buffer size %u not sufficient to meet RQ first skip %u",
inl_cfg->buf_sz, rq->first_skip);
return -EIO;
}
/* Validate if we have enough VWQE buffers */
if (rq->vwqe_ena) {
actual = roc_npa_aura_op_limit_get(rq->vwqe_aura_handle);
type_mask = roc_npa_buf_type_mask(rq->vwqe_aura_handle);
if (type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE_IPSEC) &&
type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE)) {
/* VWQE aura shared b/w Inline enabled and non Inline
* enabled ports needs enough buffers to store all the
* packet buffers, one per vwqe.
*/
mask = (BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC) |
BIT_ULL(ROC_NPA_BUF_TYPE_PACKET));
expected = roc_npa_buf_type_limit_get(mask);
if (actual < expected) {
plt_err("VWQE aura shared b/w Inline inbound and non-Inline inbound "
"ports needs vwqe bufs(%u) minimum of all pkt bufs (%u)",
actual, expected);
return -EIO;
}
} else {
/* VWQE aura not shared b/w Inline and non Inline ports have relaxed
* requirement of match all the meta buffers.
*/
expected = inl_cfg->nb_bufs;
if (actual < expected) {
plt_err("VWQE aura not shared b/w Inline inbound and non-Inline "
"ports needs vwqe bufs(%u) minimum of all meta bufs (%u)",
actual, expected);
return -EIO;
}
}
}
return 0;
}
static int
nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
{
@ -310,6 +460,10 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
if (rc)
return rc;
if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
nix->need_meta_aura = true;
idev->inl_cfg.refs++;
}
nix->inl_inb_ena = true;
return 0;
}
@ -317,12 +471,22 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
int
roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
{
struct idev_cfg *idev = idev_get_cfg();
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
if (!nix->inl_inb_ena)
return 0;
if (!idev)
return -EFAULT;
nix->inl_inb_ena = false;
if (nix->need_meta_aura) {
nix->need_meta_aura = false;
idev->inl_cfg.refs--;
if (!idev->inl_cfg.refs)
nix_inl_meta_aura_destroy();
}
/* Flush Inbound CTX cache entries */
roc_nix_cpt_ctx_cache_sync(roc_nix);
@ -592,6 +756,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
int
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
{
struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
struct idev_cfg *idev = idev_get_cfg();
int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
@ -603,6 +768,10 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
if (idev == NULL)
return 0;
/* Update meta aura handle in RQ */
if (nix->need_meta_aura)
rq->meta_aura_handle = roc_npa_zero_aura_handle();
inl_dev = idev->nix_inl_dev;
/* Nothing to do if no inline device */
if (!inl_dev)
@ -705,6 +874,13 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
return rc;
}
/* Check meta aura */
if (enable && nix->need_meta_aura) {
rc = roc_nix_inl_meta_aura_check(rq);
if (rc)
return rc;
}
inl_rq->inl_dev_refs++;
rq->inl_dev_refs = 1;
return 0;
@ -724,6 +900,7 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
if (idev == NULL)
return 0;
rq->meta_aura_handle = 0;
if (!rq->inl_dev_refs)
return 0;
@ -779,6 +956,9 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
if (rc)
return rc;
if (enable && nix->need_meta_aura)
return roc_nix_inl_meta_aura_check(inl_rq);
}
return 0;
}
@ -792,6 +972,31 @@ roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
nix->inb_inl_dev = use_inl_dev;
}
void
roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
if (!idev)
return;
/* Need to set here for cases when inbound SA table is
* managed outside RoC.
*/
nix->inl_inb_ena = ena;
if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
if (ena) {
nix->need_meta_aura = true;
idev->inl_cfg.refs++;
} else if (nix->need_meta_aura) {
nix->need_meta_aura = false;
idev->inl_cfg.refs--;
if (!idev->inl_cfg.refs)
nix_inl_meta_aura_destroy();
}
}
}
int
roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
{
@ -1128,3 +1333,9 @@ roc_nix_inl_dev_unlock(void)
if (idev != NULL)
plt_spinlock_unlock(&idev->nix_inl_dev_lock);
}
void
roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
{
meta_pool_cb = cb;
}

View File

@ -121,6 +121,9 @@ roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(void *sa)
typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
uint32_t soft_exp_event);
typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t blk_sz, uint32_t nb_bufs,
bool destroy);
struct roc_nix_inl_dev {
/* Input parameters */
struct plt_pci_device *pci_dev;
@ -135,6 +138,8 @@ struct roc_nix_inl_dev {
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
bool set_soft_exp_poll;
uint32_t nb_meta_bufs;
uint32_t meta_buf_sz;
/* End of input parameters */
#define ROC_NIX_INL_MEM_SZ (1280)
@ -165,6 +170,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
bool inl_dev_sa, uint32_t spi);
void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
void __roc_api roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena);
int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
@ -176,6 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
bool inb_inl_dev);
int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);
/* NIX Inline Outbound API */
int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@ -191,6 +198,7 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
bool poll);
uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
/* NIX Inline/Outbound API */
enum roc_nix_inl_sa_sync_op {

View File

@ -841,6 +841,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
if (roc_inl_dev->spb_drop_pc)
inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;

View File

@ -6,6 +6,8 @@
#include <pthread.h>
#include <sys/types.h>
#define NIX_INL_META_SIZE 384u
struct nix_inl_dev;
struct nix_inl_qint {
struct nix_inl_dev *inl_dev;
@ -86,6 +88,8 @@ struct nix_inl_dev {
bool attach_cptlf;
uint16_t wqe_skip;
bool ts_ena;
uint32_t nb_meta_bufs;
uint32_t meta_buf_sz;
};
int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);

View File

@ -202,6 +202,7 @@ struct nix {
uint16_t nb_cpt_lf;
uint16_t outb_se_ring_cnt;
uint16_t outb_se_ring_base;
bool need_meta_aura;
/* Mode provided by driver */
bool inb_inl_dev;

View File

@ -89,7 +89,12 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
rc = nix_rq_ena_dis(&nix->dev, rq, enable);
nix_rq_vwqe_flush(rq, nix->vwqe_interval);
if (rc)
return rc;
/* Check for meta aura if RQ is enabled */
if (enable && nix->need_meta_aura)
rc = roc_nix_inl_meta_aura_check(rq);
return rc;
}
@ -556,6 +561,13 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
/* Update aura buf type to indicate its use */
nix_rq_aura_buf_type_update(rq, true);
/* Check for meta aura if RQ is enabled */
if (ena && nix->need_meta_aura) {
rc = roc_nix_inl_meta_aura_check(rq);
if (rc)
return rc;
}
return nix_tel_node_add_rq(rq);
}
@ -594,6 +606,13 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
/* Update aura attribute to indicate its use */
nix_rq_aura_buf_type_update(rq, true);
/* Check for meta aura if RQ is enabled */
if (ena && nix->need_meta_aura) {
rc = roc_nix_inl_meta_aura_check(rq);
if (rc)
return rc;
}
return nix_tel_node_add_rq(rq);
}

View File

@ -95,6 +95,7 @@ INTERNAL {
roc_idev_npa_maxpools_set;
roc_idev_npa_nix_get;
roc_idev_num_lmtlines_get;
roc_idev_nix_inl_meta_aura_get;
roc_model;
roc_se_auth_key_set;
roc_se_ciph_key_set;
@ -156,7 +157,10 @@ INTERNAL {
roc_nix_inl_inb_sa_sz;
roc_nix_inl_inb_tag_update;
roc_nix_inl_inb_fini;
roc_nix_inl_inb_set;
roc_nix_inb_is_with_inl_dev;
roc_nix_inl_meta_aura_check;
roc_nix_inl_meta_pool_cb_register;
roc_nix_inb_mode_set;
roc_nix_inl_outb_fini;
roc_nix_inl_outb_init;