event/cnxk: move crypto adapter to respective file

Moved the common crypto adapter ops to file specific to eventdev
adapters.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
This commit is contained in:
Shijith Thotton 2022-07-27 12:45:36 +05:30 committed by Jerin Jacob
parent 235558fe94
commit 8d5387a99a
3 changed files with 118 additions and 128 deletions

View File

@ -2,129 +2,8 @@
* Copyright(C) 2021 Marvell.
*/
#include "cnxk_cryptodev_ops.h"
#include "cnxk_eventdev.h"
static int
crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
struct cnxk_cpt_qp *qp)
{
char name[RTE_MEMPOOL_NAMESIZE];
uint32_t cache_size, nb_req;
unsigned int req_size;
uint32_t nb_desc_min;
/*
* Update CPT FC threshold. Decrement by hardware burst size to allow
* simultaneous enqueue from all available cores.
*/
if (roc_model_is_cn10k())
nb_desc_min = rte_lcore_count() * 32;
else
nb_desc_min = rte_lcore_count() * 2;
if (qp->lmtline.fc_thresh < nb_desc_min) {
plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores",
rte_lcore_count());
return -ENOSPC;
}
qp->lmtline.fc_thresh -= nb_desc_min;
snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
cdev->data->dev_id, qp->lf.lf_id);
req_size = sizeof(struct cpt_inflight_req);
cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
0, NULL, NULL, NULL, NULL,
rte_socket_id(), 0);
if (qp->ca.req_mp == NULL)
return -ENOMEM;
qp->ca.enabled = true;
return 0;
}
int
cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
uint32_t adptr_xae_cnt = 0;
struct cnxk_cpt_qp *qp;
int ret;
if (queue_pair_id == -1) {
uint16_t qp_id;
for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
ret = crypto_adapter_qp_setup(cdev, qp);
if (ret) {
cnxk_crypto_adapter_qp_del(cdev, -1);
return ret;
}
adptr_xae_cnt += qp->ca.req_mp->size;
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
ret = crypto_adapter_qp_setup(cdev, qp);
if (ret)
return ret;
adptr_xae_cnt = qp->ca.req_mp->size;
}
/* Update crypto adapter XAE count */
sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
}
static int
crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
{
int ret;
rte_mempool_free(qp->ca.req_mp);
qp->ca.enabled = false;
ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id);
if (ret < 0) {
plt_err("Could not reset lmtline for queue pair %d",
qp->lf.lf_id);
return ret;
}
return 0;
}
int
cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
struct cnxk_cpt_qp *qp;
if (queue_pair_id == -1) {
uint16_t qp_id;
for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
if (qp->ca.enabled)
crypto_adapter_qp_free(qp);
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
if (qp->ca.enabled)
crypto_adapter_qp_free(qp);
}
return 0;
}
void
cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
struct rte_event_dev_info *dev_info)

View File

@ -290,13 +290,6 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
int16_t queue_port_id, const uint32_t ids[],
uint32_t n);
/* Crypto adapter APIs. */
int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id);
int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
int32_t queue_pair_id);
/* CN9K */
void cn9k_sso_set_rsrc(void *arg);
@ -321,5 +314,8 @@ int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev);
int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev);
int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev);
int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev, int32_t queue_pair_id);
int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
#endif /* __CNXK_EVENTDEV_H__ */

View File

@ -2,6 +2,7 @@
* Copyright(C) 2021 Marvell.
*/
#include "cnxk_cryptodev_ops.h"
#include "cnxk_ethdev.h"
#include "cnxk_eventdev.h"
@ -638,3 +639,117 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
return 0;
}
static int
crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp)
{
char name[RTE_MEMPOOL_NAMESIZE];
uint32_t cache_size, nb_req;
unsigned int req_size;
uint32_t nb_desc_min;
/*
* Update CPT FC threshold. Decrement by hardware burst size to allow
* simultaneous enqueue from all available cores.
*/
if (roc_model_is_cn10k())
nb_desc_min = rte_lcore_count() * 32;
else
nb_desc_min = rte_lcore_count() * 2;
if (qp->lmtline.fc_thresh < nb_desc_min) {
plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores",
rte_lcore_count());
return -ENOSPC;
}
qp->lmtline.fc_thresh -= nb_desc_min;
snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", cdev->data->dev_id, qp->lf.lf_id);
req_size = sizeof(struct cpt_inflight_req);
cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0, NULL, NULL, NULL,
NULL, rte_socket_id(), 0);
if (qp->ca.req_mp == NULL)
return -ENOMEM;
qp->ca.enabled = true;
return 0;
}
int
cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
uint32_t adptr_xae_cnt = 0;
struct cnxk_cpt_qp *qp;
int ret;
if (queue_pair_id == -1) {
uint16_t qp_id;
for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
ret = crypto_adapter_qp_setup(cdev, qp);
if (ret) {
cnxk_crypto_adapter_qp_del(cdev, -1);
return ret;
}
adptr_xae_cnt += qp->ca.req_mp->size;
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
ret = crypto_adapter_qp_setup(cdev, qp);
if (ret)
return ret;
adptr_xae_cnt = qp->ca.req_mp->size;
}
/* Update crypto adapter XAE count */
sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
}
static int
crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
{
int ret;
rte_mempool_free(qp->ca.req_mp);
qp->ca.enabled = false;
ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id);
if (ret < 0) {
plt_err("Could not reset lmtline for queue pair %d", qp->lf.lf_id);
return ret;
}
return 0;
}
int
cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id)
{
struct cnxk_cpt_qp *qp;
if (queue_pair_id == -1) {
uint16_t qp_id;
for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
if (qp->ca.enabled)
crypto_adapter_qp_free(qp);
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
if (qp->ca.enabled)
crypto_adapter_qp_free(qp);
}
return 0;
}