crypto/octeontx: add crypto adapter data path

Added support for crypto adapter OP_FORWARD mode.

As OcteonTx CPT crypto completions could be out of order, each crypto op
is enqueued to CPT, dequeued from CPT and enqueued to SSO one-by-one.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
This commit is contained in:
Shijith Thotton 2021-06-24 02:23:50 +05:30 committed by Akhil Goyal
parent 8dc6c2f12e
commit 44a2cebbd4
10 changed files with 255 additions and 86 deletions

View File

@ -92,6 +92,10 @@ New Features
* Added support for lookaside protocol (IPsec) offload in cn10k PMD.
* Added support for asymmetric crypto operations in cn9k and cn10k PMD.
* **Updated Marvell OCTEON TX crypto PMD.**
Added support for crypto adapter OP_FORWARD mode.
* **Added Baseband PHY CNXK PMD.**
Added Baseband PHY PMD which allows to configure BPHY hardware block

View File

@ -54,7 +54,7 @@ struct cpt_request_info {
uint64_t ei2;
} ist;
uint8_t *rptr;
const struct otx2_cpt_qp *qp;
const void *qp;
/** Control path fields */
uint64_t time_out;

View File

@ -6,6 +6,7 @@ if not is_linux
endif
deps += ['bus_pci']
deps += ['bus_vdev']
deps += ['common_cpt']
deps += ['eventdev']
@ -18,3 +19,7 @@ sources = files(
)
includes += include_directories('../../common/cpt')
includes += include_directories('../../common/octeontx')
includes += include_directories('../../event/octeontx')
includes += include_directories('../../mempool/octeontx')
includes += include_directories('../../net/octeontx')

View File

@ -6,6 +6,8 @@
#include <rte_bus_pci.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#include <rte_eventdev.h>
#include <rte_event_crypto_adapter.h>
#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
@ -21,6 +23,8 @@
#include "cpt_ucode.h"
#include "cpt_ucode_asym.h"
#include "ssovf_worker.h"
static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
/* Forward declarations */
@ -412,15 +416,17 @@ otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
rte_mempool_put(sess_mp, priv);
}
static __rte_always_inline int32_t __rte_hot
static __rte_always_inline void * __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
void *req, uint64_t cpt_inst_w7)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
return -EAGAIN;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN)) {
rte_errno = EAGAIN;
return NULL;
}
fill_cpt_inst(instance, req, cpt_inst_w7);
@ -434,18 +440,12 @@ otx_cpt_request_enqueue(struct cpt_instance *instance,
/* Default mode of software queue */
mark_cpt_inst(instance);
pqueue->req_queue[pqueue->enq_tail] = (uintptr_t)user_req;
/* We will use soft queue length here to limit requests */
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
pqueue->pending_count += 1;
CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
"op: %p", user_req, user_req->op);
return 0;
return req;
}
static __rte_always_inline int __rte_hot
static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_asym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
@ -456,11 +456,13 @@ otx_cpt_enq_single_asym(struct cpt_instance *instance,
struct cpt_asym_sess_misc *sess;
uintptr_t *cop;
void *mdata;
void *req;
int ret;
if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
return -ENOMEM;
rte_errno = ENOMEM;
return NULL;
}
sess = get_asym_session_private_data(asym_op->session,
@ -506,27 +508,26 @@ otx_cpt_enq_single_asym(struct cpt_instance *instance,
default:
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
ret = -EINVAL;
rte_errno = EINVAL;
goto req_fail;
}
ret = otx_cpt_request_enqueue(instance, pqueue, params.req,
req = otx_cpt_request_enqueue(instance, pqueue, params.req,
sess->cpt_inst_w7);
if (unlikely(ret)) {
if (unlikely(req == NULL)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
goto req_fail;
}
return 0;
return req;
req_fail:
free_op_meta(mdata, minfo->pool);
return ret;
return NULL;
}
static __rte_always_inline int __rte_hot
static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
@ -536,6 +537,7 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct cpt_request_info *prep_req;
void *mdata = NULL;
int ret = 0;
void *req;
uint64_t cpt_op;
sess = (struct cpt_sess_misc *)
@ -554,23 +556,20 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
return ret;
return NULL;
}
/* Enqueue prepared instruction to h/w */
ret = otx_cpt_request_enqueue(instance, pqueue, prep_req,
req = otx_cpt_request_enqueue(instance, pqueue, prep_req,
sess->cpt_inst_w7);
if (unlikely(ret)) {
if (unlikely(req == NULL))
/* Buffer allocated for request preparation need to be freed */
free_op_meta(mdata, instance->meta_info.pool);
return ret;
}
return 0;
return req;
}
static __rte_always_inline int __rte_hot
static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pend_q)
@ -578,12 +577,15 @@ otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
const int driver_id = otx_cryptodev_driver_id;
struct rte_crypto_sym_op *sym_op = op->sym;
struct rte_cryptodev_sym_session *sess;
void *req;
int ret;
/* Create temporary session */
sess = rte_cryptodev_sym_session_create(instance->sess_mp);
if (sess == NULL)
return -ENOMEM;
if (sess == NULL) {
rte_errno = ENOMEM;
return NULL;
}
ret = sym_session_configure(driver_id, sym_op->xform, sess,
instance->sess_mp_priv);
@ -592,24 +594,24 @@ otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
sym_op->session = sess;
ret = otx_cpt_enq_single_sym(instance, op, pend_q);
req = otx_cpt_enq_single_sym(instance, op, pend_q);
if (unlikely(ret))
if (unlikely(req == NULL))
goto priv_put;
return 0;
return req;
priv_put:
sym_session_clear(driver_id, sess);
sess_put:
rte_mempool_put(instance->sess_mp, sess);
return ret;
return NULL;
}
#define OP_TYPE_SYM 0
#define OP_TYPE_ASYM 1
static __rte_always_inline int __rte_hot
static __rte_always_inline void *__rte_hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
struct pending_queue *pqueue,
@ -631,7 +633,8 @@ otx_cpt_enq_single(struct cpt_instance *inst,
}
/* Should not reach here */
return -ENOTSUP;
rte_errno = ENOTSUP;
return NULL;
}
static __rte_always_inline uint16_t __rte_hot
@ -640,7 +643,7 @@ otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
uint16_t count;
int ret;
void *req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
struct pending_queue *pqueue = &cptvf->pqueue;
@ -652,10 +655,14 @@ otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
while (likely(count < nb_ops)) {
/* Enqueue single op */
ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
req = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
if (unlikely(ret))
if (unlikely(req == NULL))
break;
pqueue->req_queue[pqueue->enq_tail] = (uintptr_t)req;
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
pqueue->pending_count += 1;
count++;
}
otx_cpt_ring_dbell(instance, count);
@ -674,6 +681,80 @@ otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
static __rte_always_inline void
submit_request_to_sso(struct ssows *ws, uintptr_t req,
struct rte_event *rsp_info)
{
uint64_t add_work;
add_work = rsp_info->flow_id | (RTE_EVENT_TYPE_CRYPTODEV << 28) |
((uint64_t)(rsp_info->sched_type) << 32);
if (!rsp_info->sched_type)
ssows_head_wait(ws);
rte_atomic_thread_fence(__ATOMIC_RELEASE);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
static inline union rte_event_crypto_metadata *
get_event_crypto_mdata(struct rte_crypto_op *op)
{
union rte_event_crypto_metadata *ec_mdata;
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
ec_mdata = rte_cryptodev_sym_session_get_user_data(
op->sym->session);
else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
op->private_data_offset)
ec_mdata = (union rte_event_crypto_metadata *)
((uint8_t *)op + op->private_data_offset);
else
return NULL;
return ec_mdata;
}
uint16_t __rte_hot
otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op)
{
union rte_event_crypto_metadata *ec_mdata;
struct cpt_instance *instance;
struct cpt_request_info *req;
struct rte_event *rsp_info;
uint8_t op_type, cdev_id;
uint16_t qp_id;
ec_mdata = get_event_crypto_mdata(op);
if (unlikely(ec_mdata == NULL)) {
rte_errno = EINVAL;
return 0;
}
cdev_id = ec_mdata->request_info.cdev_id;
qp_id = ec_mdata->request_info.queue_pair_id;
rsp_info = &ec_mdata->response_info;
instance = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
if (unlikely(!instance->ca_enabled)) {
rte_errno = EINVAL;
return 0;
}
op_type = op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
OP_TYPE_ASYM;
req = otx_cpt_enq_single(instance, op,
&((struct cpt_vf *)instance)->pqueue, op_type);
if (unlikely(req == NULL))
return 0;
otx_cpt_ring_dbell(instance, 1);
req->qp = instance;
submit_request_to_sso(port, (uintptr_t)req, rsp_info);
return 1;
}
static inline void
otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
struct rte_crypto_rsa_xform *rsa_ctx)
@ -820,6 +901,50 @@ otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
return;
}
static inline void
free_sym_session_data(const struct cpt_instance *instance,
struct rte_crypto_op *cop)
{
void *sess_private_data_t = get_sym_session_private_data(
cop->sym->session, otx_cryptodev_driver_id);
memset(sess_private_data_t, 0, cpt_get_session_size());
memset(cop->sym->session, 0,
rte_cryptodev_sym_get_existing_header_session_size(
cop->sym->session));
rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
rte_mempool_put(instance->sess_mp, cop->sym->session);
cop->sym->session = NULL;
}
static __rte_always_inline struct rte_crypto_op *
otx_cpt_process_response(const struct cpt_instance *instance, uintptr_t *rsp,
uint8_t cc, const uint8_t op_type)
{
struct rte_crypto_op *cop;
void *metabuf;
metabuf = (void *)rsp[0];
cop = (void *)rsp[1];
/* Check completion code */
if (likely(cc == 0)) {
/* H/w success pkt. Post process */
otx_cpt_dequeue_post_process(cop, rsp, op_type);
} else if (cc == ERR_GC_ICV_MISCOMPARE) {
/* auth data mismatch */
cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
/* Error */
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
free_sym_session_data(instance, cop);
free_op_meta(metabuf, instance->meta_info.pool);
return cop;
}
static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
@ -832,9 +957,6 @@ otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
uint8_t ret;
int nb_completed;
struct pending_queue *pqueue = &cptvf->pqueue;
struct rte_crypto_op *cop;
void *metabuf;
uintptr_t *rsp;
pcount = pqueue->pending_count;
count = (nb_ops > pcount) ? pcount : nb_ops;
@ -869,45 +991,11 @@ otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
nb_completed = i;
for (i = 0; i < nb_completed; i++) {
rsp = (void *)ops[i];
if (likely((i + 1) < nb_completed))
rte_prefetch0(ops[i+1]);
metabuf = (void *)rsp[0];
cop = (void *)rsp[1];
ops[i] = cop;
/* Check completion code */
if (likely(cc[i] == 0)) {
/* H/w success pkt. Post process */
otx_cpt_dequeue_post_process(cop, rsp, op_type);
} else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
/* auth data mismatch */
cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
/* Error */
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
void *sess_private_data_t =
get_sym_session_private_data(cop->sym->session,
otx_cryptodev_driver_id);
memset(sess_private_data_t, 0,
cpt_get_session_size());
memset(cop->sym->session, 0,
rte_cryptodev_sym_get_existing_header_session_size(
cop->sym->session));
rte_mempool_put(instance->sess_mp_priv,
sess_private_data_t);
rte_mempool_put(instance->sess_mp, cop->sym->session);
cop->sym->session = NULL;
}
free_op_meta(metabuf, instance->meta_info.pool);
ops[i] = otx_cpt_process_response(instance, (void *)ops[i],
cc[i], op_type);
}
return nb_completed;
@ -925,6 +1013,32 @@ otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
uintptr_t __rte_hot
otx_crypto_adapter_dequeue(uintptr_t get_work1)
{
const struct cpt_instance *instance;
struct cpt_request_info *req;
struct rte_crypto_op *cop;
uint8_t cc, op_type;
uintptr_t *rsp;
req = (struct cpt_request_info *)get_work1;
instance = req->qp;
rsp = req->op;
cop = (void *)rsp[1];
op_type = cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
OP_TYPE_ASYM;
do {
cc = check_nb_command_id(
req, (struct cpt_instance *)(uintptr_t)instance);
} while (cc == ERR_REQ_PENDING);
cop = otx_cpt_process_response(instance, (void *)req->op, cc, op_type);
return (uintptr_t)(cop);
}
static struct rte_cryptodev_ops cptvf_ops = {
/* Device related operations */
.dev_configure = otx_cpt_dev_config,

View File

@ -14,4 +14,12 @@
int
otx_cpt_dev_create(struct rte_cryptodev *c_dev);
__rte_internal
uint16_t __rte_hot
otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op);
__rte_internal
uintptr_t __rte_hot
otx_crypto_adapter_dequeue(uintptr_t get_work1);
#endif /* _OTX_CRYPTODEV_OPS_H_ */

View File

@ -1,3 +1,12 @@
DPDK_21 {
local: *;
};
INTERNAL {
global:
otx_crypto_adapter_enqueue;
otx_crypto_adapter_dequeue;
local: *;
};

View File

@ -734,7 +734,8 @@ ssovf_crypto_adapter_caps_get(const struct rte_eventdev *dev,
RTE_SET_USED(dev);
RTE_SET_USED(cdev);
*caps = 0;
*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
return 0;
}

View File

@ -322,6 +322,15 @@ sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[], \
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
static uint16_t __rte_hot
ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
uint16_t nb_events)
{
RTE_SET_USED(nb_events);
return otx_crypto_adapter_enqueue(port, ev->event_ptr);
}
void
ssovf_fastpath_fns_set(struct rte_eventdev *dev)
{
@ -332,6 +341,8 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
dev->enqueue_new_burst = ssows_enq_new_burst;
dev->enqueue_forward_burst = ssows_enq_fwd_burst;
dev->ca_enqueue = ssow_crypto_adapter_enqueue;
const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
#define T(name, f3, f2, f1, f0, sz, flags) \
[f3][f2][f1][f0] = sso_event_tx_adapter_enqueue_ ##name,

View File

@ -4,6 +4,9 @@
#include <arpa/inet.h>
#ifndef _SSOVF_WORKER_H_
#define _SSOVF_WORKER_H_
#include <rte_common.h>
#include <rte_branch_prediction.h>
@ -11,6 +14,7 @@
#include "ssovf_evdev.h"
#include "octeontx_rxtx.h"
#include "otx_cryptodev_ops.h"
/* Alignment */
#define OCCTX_ALIGN 128
@ -174,14 +178,17 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
sched_type_queue = sched_type_queue << 38;
ev->event = sched_type_queue | (get_work0 & 0xffffffff);
if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
(ev->event >> 20) & 0x7F, flag, ws->lookup_mem);
if (get_work1) {
if (ev->event_type == RTE_EVENT_TYPE_ETHDEV)
get_work1 = (uintptr_t)ssovf_octeontx_wqe_to_pkt(
get_work1, (ev->event >> 20) & 0x7F, flag,
ws->lookup_mem);
else if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV)
get_work1 = otx_crypto_adapter_dequeue(get_work1);
ev->u64 = get_work1;
} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
ssovf_octeontx_wqe_free(get_work1);
return 0;
} else {
ev->u64 = get_work1;
}
return !!get_work1;
@ -254,3 +261,11 @@ ssows_swtag_wait(struct ssows *ws)
while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
;
}
static __rte_always_inline void
ssows_head_wait(struct ssows *ws)
{
while (!(ssovf_read64(ws->base + SSOW_VHWS_TAG) & (1ULL << 35)))
;
}
#endif /* _SSOVF_WORKER_H_ */

View File

@ -54,6 +54,7 @@ static inline uint64_t
otx2_handle_crypto_event(uint64_t get_work1)
{
struct cpt_request_info *req;
const struct otx2_cpt_qp *qp;
struct rte_crypto_op *cop;
uintptr_t *rsp;
void *metabuf;
@ -61,14 +62,15 @@ otx2_handle_crypto_event(uint64_t get_work1)
req = (struct cpt_request_info *)(get_work1);
cc = otx2_cpt_compcode_get(req);
qp = req->qp;
rsp = req->op;
metabuf = (void *)rsp[0];
cop = (void *)rsp[1];
otx2_ca_deq_post_process(req->qp, cop, rsp, cc);
otx2_ca_deq_post_process(qp, cop, rsp, cc);
rte_mempool_put(req->qp->meta_info.pool, metabuf);
rte_mempool_put(qp->meta_info.pool, metabuf);
return (uint64_t)(cop);
}