common/cnxk: support per-port RQ in inline device

Add support for per port RQ in inline device thereby using
Aura/Pool attributes from that port specific first RQ.
When inline device is used with channel masking, it will
fallback to single RQ for all ethdev ports.

Also remove clamping up of CQ size for LBK ethdev when
inline inbound is enabled as now backpressure is supported
even on LBK ethdevs.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2022-05-08 13:18:39 +05:30 committed by Jerin Jacob
parent 14124e48a3
commit 3c100e0e6b
10 changed files with 201 additions and 129 deletions

View File

@ -303,7 +303,7 @@ struct roc_nix_rq {
bool spb_drop_ena;
/* End of Input parameters */
struct roc_nix *roc_nix;
bool inl_dev_ref;
uint16_t inl_dev_refs;
};
struct roc_nix_cq {

View File

@ -826,7 +826,7 @@ roc_nix_rq_dump(struct roc_nix_rq *rq)
nix_dump(" vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
nix_dump(" vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
nix_dump(" roc_nix = %p", rq->roc_nix);
nix_dump(" inl_dev_ref = %d", rq->inl_dev_ref);
nix_dump(" inl_dev_refs = %d", rq->inl_dev_refs);
}
void
@ -1243,6 +1243,7 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
struct nix_inl_dev *inl_dev =
(struct nix_inl_dev *)&roc_inl_dev->reserved;
struct dev *dev = &inl_dev->dev;
int i;
nix_dump("nix_inl_dev@%p", inl_dev);
nix_dump(" pf = %d", dev_get_pf(dev->pf_func));
@ -1259,7 +1260,6 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
nix_dump(" \tssow_msixoff = %d", inl_dev->ssow_msixoff);
nix_dump(" \tnix_cints = %d", inl_dev->cints);
nix_dump(" \tnix_qints = %d", inl_dev->qints);
nix_dump(" \trq_refs = %d", inl_dev->rq_refs);
nix_dump(" \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
nix_dump(" \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
nix_dump(" \txaq_buf_size = %u", inl_dev->xaq_buf_size);
@ -1269,5 +1269,6 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
nix_dump(" \txaq_mem = 0x%p", inl_dev->xaq.mem);
nix_dump(" \tinl_dev_rq:");
roc_nix_rq_dump(&inl_dev->rq);
for (i = 0; i < inl_dev->nb_rqs; i++)
roc_nix_rq_dump(&inl_dev->rqs[i]);
}

View File

@ -588,8 +588,10 @@ int
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
uint16_t inl_rq_id;
struct dev *dev;
int rc;
@ -601,19 +603,24 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
if (!inl_dev)
return 0;
/* Check if this RQ is already holding reference */
if (rq->inl_dev_refs)
return 0;
inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
dev = &inl_dev->dev;
inl_rq = &inl_dev->rqs[inl_rq_id];
/* Just take reference if already inited */
if (inl_dev->rq_refs) {
inl_dev->rq_refs++;
rq->inl_dev_ref = true;
if (inl_rq->inl_dev_refs) {
inl_rq->inl_dev_refs++;
rq->inl_dev_refs = 1;
return 0;
}
dev = &inl_dev->dev;
inl_rq = &inl_dev->rq;
memset(inl_rq, 0, sizeof(struct roc_nix_rq));
/* Take RQ pool attributes from the first ethdev RQ */
inl_rq->qid = 0;
inl_rq->qid = inl_rq_id;
inl_rq->aura_handle = rq->aura_handle;
inl_rq->first_skip = rq->first_skip;
inl_rq->later_skip = rq->later_skip;
@ -691,8 +698,8 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
return rc;
}
inl_dev->rq_refs++;
rq->inl_dev_ref = true;
inl_rq->inl_dev_refs++;
rq->inl_dev_refs = 1;
return 0;
}
@ -700,15 +707,17 @@ int
roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
uint16_t inl_rq_id;
struct dev *dev;
int rc;
if (idev == NULL)
return 0;
if (!rq->inl_dev_ref)
if (!rq->inl_dev_refs)
return 0;
inl_dev = idev->nix_inl_dev;
@ -718,13 +727,15 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
return -EFAULT;
}
rq->inl_dev_ref = false;
inl_dev->rq_refs--;
if (inl_dev->rq_refs)
dev = &inl_dev->dev;
inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
inl_rq = &inl_dev->rqs[inl_rq_id];
rq->inl_dev_refs = 0;
inl_rq->inl_dev_refs--;
if (inl_rq->inl_dev_refs)
return 0;
dev = &inl_dev->dev;
inl_rq = &inl_dev->rq;
/* There are no more references, disable RQ */
rc = nix_rq_ena_dis(dev, inl_rq, false);
if (rc)
@ -740,25 +751,6 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
return rc;
}
uint64_t
roc_nix_inl_dev_rq_limit_get(void)
{
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
if (!idev || !idev->nix_inl_dev)
return 0;
inl_dev = idev->nix_inl_dev;
if (!inl_dev->rq_refs)
return 0;
inl_rq = &inl_dev->rq;
return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
}
void
roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
{
@ -807,15 +799,22 @@ roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
}
struct roc_nix_rq *
roc_nix_inl_dev_rq(void)
roc_nix_inl_dev_rq(struct roc_nix *roc_nix)
{
struct idev_cfg *idev = idev_get_cfg();
int port_id = roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
uint16_t inl_rq_id;
if (idev != NULL) {
inl_dev = idev->nix_inl_dev;
if (inl_dev != NULL && inl_dev->rq_refs)
return &inl_dev->rq;
if (inl_dev != NULL) {
inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
inl_rq = &inl_dev->rqs[inl_rq_id];
if (inl_rq->inl_dev_refs)
return inl_rq;
}
}
return NULL;
@ -1025,6 +1024,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
void *sa, *sa_base = NULL;
struct nix *nix = NULL;
uint16_t max_spi = 0;
uint32_t rq_refs = 0;
uint8_t pkind = 0;
int i;
@ -1047,7 +1047,10 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
}
if (inl_dev) {
if (inl_dev->rq_refs == 0) {
for (i = 0; i < inl_dev->nb_rqs; i++)
rq_refs += inl_dev->rqs[i].inl_dev_refs;
if (rq_refs == 0) {
inl_dev->ts_ena = ts_ena;
max_spi = inl_dev->ipsec_in_max_spi;
sa_base = inl_dev->inb_sa_base;

View File

@ -168,12 +168,11 @@ void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(void);
struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
int __roc_api roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix,
uint32_t tag_const, uint8_t tt);
uint64_t __roc_api roc_nix_inl_dev_rq_limit_get(void);
int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
uint16_t max_frags);
uint16_t max_frags);
int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
bool inb_inl_dev);

View File

@ -334,6 +334,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
struct nix_lf_alloc_rsp *rsp;
struct nix_lf_alloc_req *req;
struct nix_hw_info *hw_info;
struct roc_nix_rq *rqs;
uint64_t max_sa, i;
size_t inb_sa_sz;
int rc = -ENOSPC;
@ -345,7 +346,8 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
req = mbox_alloc_msg_nix_lf_alloc(mbox);
if (req == NULL)
return rc;
req->rq_cnt = 1;
/* We will have per-port RQ if it is not with channel masking */
req->rq_cnt = inl_dev->nb_rqs;
req->sq_cnt = 1;
req->cq_cnt = 1;
/* XQESZ is W16 */
@ -421,6 +423,14 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
goto free_mem;
}
/* Allocate memory for RQ's */
rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
if (!rqs) {
plt_err("Failed to allocate memory for RQ's");
goto free_mem;
}
inl_dev->rqs = rqs;
return 0;
free_mem:
plt_free(inl_dev->inb_sa_base);
@ -464,7 +474,15 @@ nix_inl_nix_release(struct nix_inl_dev *inl_dev)
if (req == NULL)
return -ENOSPC;
return mbox_process(mbox);
rc = mbox_process(mbox);
if (rc)
return rc;
plt_free(inl_dev->rqs);
plt_free(inl_dev->inb_sa_base);
inl_dev->rqs = NULL;
inl_dev->inb_sa_base = NULL;
return 0;
}
static int
@ -584,10 +602,13 @@ roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
no_pool:
/* Disable RQ if enabled */
if (inl_dev->rq_refs) {
rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
for (i = 0; i < inl_dev->nb_rqs; i++) {
if (!inl_dev->rqs[i].inl_dev_refs)
continue;
rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
if (rc) {
plt_err("Failed to disable inline dev RQ, rc=%d", rc);
plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
rc);
return rc;
}
}
@ -633,10 +654,14 @@ roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
exit:
/* Renable RQ */
if (inl_dev->rq_refs) {
rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
for (i = 0; i < inl_dev->nb_rqs; i++) {
if (!inl_dev->rqs[i].inl_dev_refs)
continue;
rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
if (rc)
plt_err("Failed to enable inline dev RQ, rc=%d", rc);
plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
rc);
}
return rc;
@ -815,6 +840,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
if (roc_inl_dev->spb_drop_pc)
inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;

View File

@ -179,50 +179,59 @@ nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev)
static void
nix_inl_nix_q_irq(void *param)
{
struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
struct nix_inl_qint *qints_mem = (struct nix_inl_qint *)param;
struct nix_inl_dev *inl_dev = qints_mem->inl_dev;
uintptr_t nix_base = inl_dev->nix_base;
struct dev *dev = &inl_dev->dev;
uint16_t qint = qints_mem->qint;
volatile void *ctx;
uint64_t reg, intr;
uint64_t wdata;
uint8_t irq;
int rc;
int rc, q;
intr = plt_read64(nix_base + NIX_LF_QINTX_INT(0));
intr = plt_read64(nix_base + NIX_LF_QINTX_INT(qint));
if (intr == 0)
return;
plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
dev->vf);
/* Get and clear RQ0 interrupt */
reg = roc_atomic64_add_nosync(0,
(int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
if (reg & BIT_ULL(42) /* OP_ERR */) {
plt_err("Failed to get rq_int");
return;
/* Handle RQ interrupts */
for (q = 0; q < inl_dev->nb_rqs; q++) {
/* Get and clear RQ interrupts */
wdata = (uint64_t)q << 44;
reg = roc_atomic64_add_nosync(wdata,
(int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
if (reg & BIT_ULL(42) /* OP_ERR */) {
plt_err("Failed to get rq_int");
return;
}
irq = reg & 0xff;
plt_write64(wdata | irq, nix_base + NIX_LF_RQ_OP_INT);
if (irq & BIT_ULL(NIX_RQINT_DROP))
plt_err("RQ=0 NIX_RQINT_DROP");
if (irq & BIT_ULL(NIX_RQINT_RED))
plt_err("RQ=0 NIX_RQINT_RED");
}
irq = reg & 0xff;
plt_write64(0 | irq, nix_base + NIX_LF_RQ_OP_INT);
if (irq & BIT_ULL(NIX_RQINT_DROP))
plt_err("RQ=0 NIX_RQINT_DROP");
if (irq & BIT_ULL(NIX_RQINT_RED))
plt_err("RQ=0 NIX_RQINT_RED");
/* Clear interrupt */
plt_write64(intr, nix_base + NIX_LF_QINTX_INT(0));
plt_write64(intr, nix_base + NIX_LF_QINTX_INT(qint));
/* Dump registers to std out */
nix_inl_nix_reg_dump(inl_dev);
/* Dump RQ 0 */
rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
if (rc) {
plt_err("Failed to get rq context");
return;
/* Dump RQs */
for (q = 0; q < inl_dev->nb_rqs; q++) {
rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
if (rc) {
plt_err("Failed to get rq %d context, rc=%d", q, rc);
continue;
}
nix_lf_rq_dump(ctx);
}
nix_lf_rq_dump(ctx);
}
static void
@ -233,7 +242,7 @@ nix_inl_nix_ras_irq(void *param)
struct dev *dev = &inl_dev->dev;
volatile void *ctx;
uint64_t intr;
int rc;
int rc, q;
intr = plt_read64(nix_base + NIX_LF_RAS);
if (intr == 0)
@ -246,13 +255,15 @@ nix_inl_nix_ras_irq(void *param)
/* Dump registers to std out */
nix_inl_nix_reg_dump(inl_dev);
/* Dump RQ 0 */
rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
if (rc) {
plt_err("Failed to get rq context");
return;
/* Dump RQs */
for (q = 0; q < inl_dev->nb_rqs; q++) {
rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
if (rc) {
plt_err("Failed to get rq %d context, rc=%d", q, rc);
continue;
}
nix_lf_rq_dump(ctx);
}
nix_lf_rq_dump(ctx);
}
static void
@ -263,7 +274,7 @@ nix_inl_nix_err_irq(void *param)
struct dev *dev = &inl_dev->dev;
volatile void *ctx;
uint64_t intr;
int rc;
int rc, q;
intr = plt_read64(nix_base + NIX_LF_ERR_INT);
if (intr == 0)
@ -277,13 +288,15 @@ nix_inl_nix_err_irq(void *param)
/* Dump registers to std out */
nix_inl_nix_reg_dump(inl_dev);
/* Dump RQ 0 */
rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
if (rc) {
plt_err("Failed to get rq context");
return;
/* Dump RQs */
for (q = 0; q < inl_dev->nb_rqs; q++) {
rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
if (rc) {
plt_err("Failed to get rq %d context, rc=%d", q, rc);
continue;
}
nix_lf_rq_dump(ctx);
}
nix_lf_rq_dump(ctx);
}
int
@ -291,8 +304,10 @@ nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
{
struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
uintptr_t nix_base = inl_dev->nix_base;
struct nix_inl_qint *qints_mem;
int rc, q, ret = 0;
uint16_t msixoff;
int rc;
int qints;
msixoff = inl_dev->nix_msixoff;
if (msixoff == MSIX_VECTOR_INVALID) {
@ -317,21 +332,38 @@ nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
/* Enable RAS interrupts */
plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
/* Setup queue irq for RQ 0 */
/* Setup queue irq for RQ's */
qints = PLT_MIN(inl_dev->nb_rqs, inl_dev->qints);
qints_mem = plt_zmalloc(sizeof(struct nix_inl_qint) * qints, 0);
if (!qints_mem) {
plt_err("Failed to allocate memory for %u qints", qints);
return -ENOMEM;
}
/* Clear QINT CNT, interrupt */
plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
inl_dev->configured_qints = qints;
inl_dev->qints_mem = qints_mem;
/* Register queue irq vector */
rc |= dev_irq_register(handle, nix_inl_nix_q_irq, inl_dev,
msixoff + NIX_LF_INT_VEC_QINT_START);
for (q = 0; q < qints; q++) {
/* Clear QINT CNT, interrupt */
plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
/* Enable QINT interrupt */
plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(0));
/* Register queue irq vector */
ret = dev_irq_register(handle, nix_inl_nix_q_irq, &qints_mem[q],
msixoff + NIX_LF_INT_VEC_QINT_START + q);
if (ret)
break;
plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
/* Enable QINT interrupt */
plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(q));
qints_mem[q].inl_dev = inl_dev;
qints_mem[q].qint = q;
}
rc |= ret;
return rc;
}
@ -339,8 +371,10 @@ void
nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
{
struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
struct nix_inl_qint *qints_mem = inl_dev->qints_mem;
uintptr_t nix_base = inl_dev->nix_base;
uint16_t msixoff;
int q;
msixoff = inl_dev->nix_msixoff;
/* Disable err interrupts */
@ -353,14 +387,19 @@ nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
msixoff + NIX_LF_INT_VEC_POISON);
/* Clear QINT CNT */
plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
for (q = 0; q < inl_dev->configured_qints; q++) {
/* Clear QINT CNT */
plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
/* Disable QINT interrupt */
plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
/* Disable QINT interrupt */
plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
/* Unregister queue irq vector */
dev_irq_unregister(handle, nix_inl_nix_q_irq, inl_dev,
msixoff + NIX_LF_INT_VEC_QINT_START);
/* Unregister queue irq vector */
dev_irq_unregister(handle, nix_inl_nix_q_irq, &qints_mem[q],
msixoff + NIX_LF_INT_VEC_QINT_START + q);
}
plt_free(inl_dev->qints_mem);
inl_dev->qints_mem = NULL;
}

View File

@ -6,6 +6,12 @@
#include <pthread.h>
#include <sys/types.h>
struct nix_inl_dev;
struct nix_inl_qint {
struct nix_inl_dev *inl_dev;
uint16_t qint;
};
struct nix_inl_dev {
/* Base device object */
struct dev dev;
@ -42,8 +48,10 @@ struct nix_inl_dev {
uint16_t vwqe_interval;
uint16_t cints;
uint16_t qints;
struct roc_nix_rq rq;
uint16_t rq_refs;
uint16_t configured_qints;
struct roc_nix_rq *rqs;
struct nix_inl_qint *qints_mem;
uint16_t nb_rqs;
bool is_nix1;
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;

View File

@ -350,6 +350,7 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
uint8_t has_msns_act = 0;
int sel_act, req_act = 0;
uint16_t pf_func, vf_id;
struct roc_nix *roc_nix;
int errcode = 0;
int mark = 0;
int rq = 0;
@ -436,11 +437,19 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
*/
req_act |= ROC_NPC_ACTION_TYPE_SEC;
rq = 0;
roc_nix = roc_npc->roc_nix;
/* Special processing when with inline device */
if (roc_nix_inb_is_with_inl_dev(roc_npc->roc_nix) &&
if (roc_nix_inb_is_with_inl_dev(roc_nix) &&
roc_nix_inl_dev_is_probed()) {
rq = 0;
struct roc_nix_rq *inl_rq;
inl_rq = roc_nix_inl_dev_rq(roc_nix);
if (!inl_rq) {
errcode = NPC_ERR_INTERNAL;
goto err_exit;
}
rq = inl_rq->qid;
pf_func = nix_inl_dev_pffunc_get();
}
rc = npc_parse_msns_action(roc_npc, actions, flow,

View File

@ -138,7 +138,6 @@ INTERNAL {
roc_nix_inl_dev_rq;
roc_nix_inl_dev_rq_get;
roc_nix_inl_dev_rq_put;
roc_nix_inl_dev_rq_limit_get;
roc_nix_inl_dev_unlock;
roc_nix_inl_dev_xaq_realloc;
roc_nix_inl_inb_is_enabled;

View File

@ -546,19 +546,6 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
eth_dev->data->rx_queues[qid] = NULL;
}
/* Clam up cq limit to size of packet pool aura for LBK
* to avoid meta packet drop as LBK does not currently support
* backpressure.
*/
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
/* Use current RQ's aura limit if inl rq is not available */
if (!pkt_pool_limit)
pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
}
/* Its a no-op when inline device is not used */
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
@ -1675,6 +1662,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
/* Initialize base roc nix */
nix->pci_dev = pci_dev;
nix->hw_vlan_ins = true;
nix->port_id = eth_dev->data->port_id;
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);