crypto/octeontx2: support CN98xx

CN98xx SoC comes up with two CPT blocks wrt
CN96xx, CN93xx, to achieve higher performance.

Adding support to allocate all LFs of VF with even BDF from CPT0
and all LFs of VF with odd BDF from CPT1.
If LFs are not available in one block then they will be allocated
from alternate block.

Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Acked-by: Anoob Joseph <anoobj@marvell.com>
This commit is contained in:
Tejasree Kondoj 2020-12-31 23:22:57 +05:30 committed by Akhil Goyal
parent 5be562bc5b
commit bab97a3ffb
11 changed files with 66 additions and 23 deletions

View File

@ -117,11 +117,15 @@ Another way to bind the VF would be to use the ``dpdk-devbind.py`` script:
.. note::
Ensure that sufficient huge pages are available for your application::
* For CN98xx SoC, it is recommended to use even and odd DBDF VFs to achieve
higher performance as even VF uses one crypto engine and odd one uses
another crypto engine.
echo 8 > /sys/kernel/mm/hugepages/hugepages-524288kB/nr_hugepages
* Ensure that sufficient huge pages are available for your application::
Refer to :ref:`linux_gsg_hugepages` for more details.
echo 8 > /sys/kernel/mm/hugepages/hugepages-524288kB/nr_hugepages
Refer to :ref:`linux_gsg_hugepages` for more details.
Debugging Options
-----------------

View File

@ -80,6 +80,7 @@ New Features
* Updated the OCTEON TX2 crypto PMD lookaside protocol offload for IPsec with
ESN and anti-replay support.
* Updated the OCTEON TX2 crypto PMD with CN98xx support.
Removed Items

View File

@ -142,6 +142,7 @@
#define RVU_BLOCK_ADDR_SSOW (0x8ull)
#define RVU_BLOCK_ADDR_TIM (0x9ull)
#define RVU_BLOCK_ADDR_CPT0 (0xaull)
#define RVU_BLOCK_ADDR_CPT1 (0xbull)
#define RVU_BLOCK_ADDR_NDC0 (0xcull)
#define RVU_BLOCK_ADDR_NDC1 (0xdull)
#define RVU_BLOCK_ADDR_NDC2 (0xeull)

View File

@ -13,9 +13,11 @@
/* Marvell OCTEON TX2 Crypto PMD device name */
#define CRYPTODEV_NAME_OCTEONTX2_PMD crypto_octeontx2
#define OTX2_CPT_MAX_LFS 64
#define OTX2_CPT_MAX_LFS 128
#define OTX2_CPT_MAX_QUEUES_PER_VF 64
#define OTX2_CPT_MAX_BLKS 2
#define OTX2_CPT_PMD_VERSION 3
#define OTX2_CPT_REVISION_ID_3 3
/**
* Device private data
@ -29,6 +31,10 @@ struct otx2_cpt_vf {
/**< Number of crypto queues attached */
uint16_t lf_msixoff[OTX2_CPT_MAX_LFS];
/**< MSI-X offsets */
uint8_t lf_blkaddr[OTX2_CPT_MAX_LFS];
/**< CPT0/1 BLKADDR of LFs */
uint8_t cpt_revision;
/**< CPT revision */
uint8_t err_intr_registered:1;
/**< Are error interrupts registered? */
union cpt_eng_caps hw_caps[CPT_MAX_ENG_TYPES];

View File

@ -53,7 +53,7 @@ otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev)
uint32_t i;
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_CPT_LF_BAR2(vf, i);
base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[i], i);
otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
}
@ -99,7 +99,7 @@ otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
}
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_CPT_LF_BAR2(vf, i);
base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[i], i);
ret = otx2_cpt_lf_err_intr_register(dev, vf->lf_msixoff[i],
base);
if (ret)
@ -112,7 +112,7 @@ otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
intr_unregister:
/* Unregister the ones already registered */
for (j = 0; j < i; j++) {
base = OTX2_CPT_LF_BAR2(vf, j);
base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[j], j);
otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
}
@ -144,13 +144,13 @@ otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
/* Set engine group mask and priority */
ret = otx2_cpt_af_reg_read(dev, OTX2_CPT_AF_LF_CTL(qp->id),
&af_lf_ctl.u);
qp->blkaddr, &af_lf_ctl.u);
if (ret)
return ret;
af_lf_ctl.s.grp = grp_mask;
af_lf_ctl.s.pri = pri ? 1 : 0;
ret = otx2_cpt_af_reg_write(dev, OTX2_CPT_AF_LF_CTL(qp->id),
af_lf_ctl.u);
qp->blkaddr, af_lf_ctl.u);
if (ret)
return ret;

View File

@ -44,9 +44,9 @@
#define OTX2_CPT_AF_LF_CTL(a) (0x27000ull | (uint64_t)(a) << 3)
#define OTX2_CPT_AF_LF_CTL2(a) (0x29000ull | (uint64_t)(a) << 3)
#define OTX2_CPT_LF_BAR2(vf, q_id) \
#define OTX2_CPT_LF_BAR2(vf, blk_addr, q_id) \
((vf)->otx2_dev.bar2 + \
((RVU_BLOCK_ADDR_CPT0 << 20) | ((q_id) << 12)))
((blk_addr << 20) | ((q_id) << 12)))
#define OTX2_CPT_QUEUE_HI_PRIO 0x1

View File

@ -36,6 +36,7 @@ otx2_cpt_hardware_caps_get(const struct rte_cryptodev *dev,
return -EPIPE;
}
vf->cpt_revision = rsp->cpt_revision;
memcpy(hw_caps, rsp->eng_caps,
sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
@ -57,7 +58,7 @@ otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
if (ret)
return -EIO;
*nb_queues = rsp->cpt;
*nb_queues = rsp->cpt + rsp->cpt1;
return 0;
}
@ -66,20 +67,44 @@ otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
int blkaddr[OTX2_CPT_MAX_BLKS];
struct rsrc_attach_req *req;
int blknum = 0;
int i, ret;
blkaddr[0] = RVU_BLOCK_ADDR_CPT0;
blkaddr[1] = RVU_BLOCK_ADDR_CPT1;
/* Ask AF to attach required LFs */
req = otx2_mbox_alloc_msg_attach_resources(mbox);
if ((vf->cpt_revision == OTX2_CPT_REVISION_ID_3) &&
(vf->otx2_dev.pf_func & 0x1))
blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
/* 1 LF = 1 queue */
req->cptlfs = nb_queues;
req->cpt_blkaddr = blkaddr[blknum];
if (otx2_mbox_process(mbox) < 0)
ret = otx2_mbox_process(mbox);
if (ret == -ENOSPC) {
if (vf->cpt_revision == OTX2_CPT_REVISION_ID_3) {
blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
req->cpt_blkaddr = blkaddr[blknum];
if (otx2_mbox_process(mbox) < 0)
return -EIO;
} else {
return -EIO;
}
} else if (ret < 0) {
return -EIO;
}
/* Update number of attached queues */
vf->nb_queues = nb_queues;
for (i = 0; i < nb_queues; i++)
vf->lf_blkaddr[i] = req->cpt_blkaddr;
return 0;
}
@ -120,7 +145,8 @@ otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
return ret;
for (i = 0; i < vf->nb_queues; i++)
vf->lf_msixoff[i] = rsp->cptlf_msixoff[i];
vf->lf_msixoff[i] = (vf->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
rsp->cpt1_lf_msixoff[i] : rsp->cptlf_msixoff[i];
return 0;
}
@ -144,7 +170,7 @@ otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
int
otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t *val)
uint8_t blkaddr, uint64_t *val)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
@ -166,6 +192,7 @@ otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
msg->is_write = 0;
msg->reg_offset = reg;
msg->ret_val = val;
msg->blkaddr = blkaddr;
ret = otx2_cpt_send_mbox_msg(vf);
if (ret < 0)
@ -182,7 +209,7 @@ otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
int
otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t val)
uint8_t blkaddr, uint64_t val)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
@ -202,6 +229,7 @@ otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
msg->is_write = 1;
msg->reg_offset = reg;
msg->val = val;
msg->blkaddr = blkaddr;
return otx2_cpt_send_mbox_msg(vf);
}

View File

@ -23,11 +23,11 @@ int otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev);
__rte_internal
int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t *val);
uint8_t blkaddr, uint64_t *val);
__rte_internal
int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t val);
uint8_t blkaddr, uint64_t val);
int otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev,
struct otx2_cpt_qp *qp, uint16_t port_id);

View File

@ -242,7 +242,8 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
qp->iq_dma_addr = iova;
qp->id = qp_id;
qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
qp->blkaddr = vf->lf_blkaddr[qp_id];
qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
lmtline = vf->otx2_dev.bar2 +
(RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +

View File

@ -15,6 +15,8 @@
struct otx2_cpt_qp {
uint32_t id;
/**< Queue pair id */
uint8_t blkaddr;
/**< CPT0/1 BLKADDR of LF */
uintptr_t base;
/**< Base address where BAR is mapped */
void *lmtline;

View File

@ -38,13 +38,13 @@ otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
&af_lf_ctl2.u);
qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
af_lf_ctl2.s.sso_pf_func = otx2_sso_pf_func_get();
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
af_lf_ctl2.u);
qp->blkaddr, af_lf_ctl2.u);
if (ret)
return ret;
@ -69,13 +69,13 @@ otx2_ca_qp_del(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
memset(&qp->ev, 0, sizeof(struct rte_event));
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
&af_lf_ctl2.u);
qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
af_lf_ctl2.s.sso_pf_func = 0;
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
af_lf_ctl2.u);
qp->blkaddr, af_lf_ctl2.u);
return ret;
}