event/cnxk: add crypto adapter operations

Added eventdev ops required to initialize crypto adapter.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
This commit is contained in:
Shijith Thotton 2021-09-02 20:11:52 +05:30 committed by Akhil Goyal
parent 044bb99d6f
commit 19f81cb59c
6 changed files with 213 additions and 1 deletions

View File

@ -55,6 +55,13 @@ struct pending_queue {
uint64_t time_out;
};
struct crypto_adpter_info {
bool enabled;
/**< Set if queue pair is added to crypto adapter */
struct rte_mempool *req_mp;
/**< CPT inflight request mempool */
};
struct cnxk_cpt_qp {
struct roc_cpt_lf lf;
/**< Crypto LF */
@ -68,6 +75,8 @@ struct cnxk_cpt_qp {
/**< Metabuf info required to support operations on the queue pair */
struct roc_cpt_lmtline lmtline;
/**< Lmtline information */
struct crypto_adpter_info ca;
/**< Crypto adapter related info */
};
int cnxk_cpt_dev_config(struct rte_cryptodev *dev,

View File

@ -773,6 +773,48 @@ cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
return cn10k_sso_updt_tx_adptr_data(event_dev);
}
static int
cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev, uint32_t *caps)
{
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
*caps = 0;
return 0;
}
static int
cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id,
const struct rte_event *event)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
RTE_SET_USED(event);
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
dev->is_ca_internal_port = 1;
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
}
static int
cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
}
static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
@ -802,6 +844,10 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.timer_adapter_caps_get = cnxk_tim_caps_get,
.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
.dump = cnxk_sso_dump,
.dev_start = cn10k_sso_start,
.dev_stop = cn10k_sso_stop,

View File

@ -923,6 +923,47 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
return cn9k_sso_updt_tx_adptr_data(event_dev);
}
static int
cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev, uint32_t *caps)
{
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
*caps = 0;
return 0;
}
static int
cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id, const struct rte_event *event)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
RTE_SET_USED(event);
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
dev->is_ca_internal_port = 1;
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
}
static int
cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
}
static struct rte_eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
@ -948,6 +989,10 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
.timer_adapter_caps_get = cnxk_tim_caps_get,
.crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
.dump = cnxk_sso_dump,
.dev_start = cn9k_sso_start,
.dev_stop = cn9k_sso_stop,

View File

@ -2,8 +2,102 @@
* Copyright(C) 2021 Marvell.
*/
#include "cnxk_cryptodev_ops.h"
#include "cnxk_eventdev.h"
static int
crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
struct cnxk_cpt_qp *qp)
{
char name[RTE_MEMPOOL_NAMESIZE];
uint32_t cache_size, nb_req;
unsigned int req_size;
snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
cdev->data->dev_id, qp->lf.lf_id);
req_size = sizeof(struct cpt_inflight_req);
cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
0, NULL, NULL, NULL, NULL,
rte_socket_id(), 0);
if (qp->ca.req_mp == NULL)
return -ENOMEM;
qp->ca.enabled = true;
return 0;
}
int
cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
uint32_t adptr_xae_cnt = 0;
struct cnxk_cpt_qp *qp;
int ret;
if (queue_pair_id == -1) {
uint16_t qp_id;
for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
ret = crypto_adapter_qp_setup(cdev, qp);
if (ret) {
cnxk_crypto_adapter_qp_del(cdev, -1);
return ret;
}
adptr_xae_cnt += qp->ca.req_mp->size;
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
ret = crypto_adapter_qp_setup(cdev, qp);
if (ret)
return ret;
adptr_xae_cnt = qp->ca.req_mp->size;
}
/* Update crypto adapter XAE count */
sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
}
static int
crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
{
rte_mempool_free(qp->ca.req_mp);
qp->ca.enabled = false;
return 0;
}
int
cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
struct cnxk_cpt_qp *qp;
if (queue_pair_id == -1) {
uint16_t qp_id;
for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
if (qp->ca.enabled)
crypto_adapter_qp_free(qp);
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
if (qp->ca.enabled)
crypto_adapter_qp_free(qp);
}
return 0;
}
void
cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
struct rte_event_dev_info *dev_info)

View File

@ -5,6 +5,9 @@
#ifndef __CNXK_EVENTDEV_H__
#define __CNXK_EVENTDEV_H__
#include <string.h>
#include <rte_cryptodev.h>
#include <rte_devargs.h>
#include <rte_ethdev.h>
#include <rte_event_eth_rx_adapter.h>
@ -51,6 +54,12 @@
#define CN10K_GW_MODE_PREF 1
#define CN10K_GW_MODE_PREF_WFE 2
#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \
do { \
if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \
return -EINVAL; \
} while (0)
typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id);
typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t *grp_base);
typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
@ -108,6 +117,8 @@ struct cnxk_sso_evdev {
uint8_t dual_ws;
/* CN10K */
uint8_t gw_mode;
/* Crypto adapter */
uint8_t is_ca_internal_port;
} __rte_cache_aligned;
struct cn10k_sso_hws {
@ -266,6 +277,13 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
int16_t queue_port_id, const uint32_t ids[],
uint32_t n);
/* Crypto adapter APIs. */
int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_cryptodev *cdev,
int32_t queue_pair_id);
int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
int32_t queue_pair_id);
/* CN9K */
void cn9k_sso_set_rsrc(void *arg);

View File

@ -43,4 +43,4 @@ foreach flag: extra_flags
endif
endforeach
deps += ['bus_pci', 'common_cnxk', 'net_cnxk']
deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']