crypto/octeontx: move device specific code to driver

Moving upper level enqueue/dequeue routines to driver. The h/w interface
used to submit request has enough differences to substantiate the need
for separate routines.

Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
This commit is contained in:
Anoob Joseph 2019-03-01 18:42:32 +00:00 committed by Akhil Goyal
parent f39928e656
commit f194f19821
7 changed files with 255 additions and 217 deletions

View File

@ -9,12 +9,6 @@
* This file defines common macros and structs * This file defines common macros and structs
*/ */
/*
* Macros to determine CPT model. Driver makefile will define CPT_MODEL
* accordingly
*/
#define CRYPTO_OCTEONTX 0x1
#define TIME_IN_RESET_COUNT 5 #define TIME_IN_RESET_COUNT 5
/* Default command timeout in seconds */ /* Default command timeout in seconds */

View File

@ -383,4 +383,16 @@ typedef mc_hash_type_t auth_type_t;
#define SESS_PRIV(__sess) \ #define SESS_PRIV(__sess) \
(void *)((uint8_t *)__sess + sizeof(struct cpt_sess_misc)) (void *)((uint8_t *)__sess + sizeof(struct cpt_sess_misc))
/*
* Get the session size
*
* @return
* - session size
*/
static __rte_always_inline unsigned int
cpt_get_session_size(void)
{
unsigned int ctx_len = sizeof(struct cpt_ctx);
return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
}
#endif /* _CPT_MCODE_DEFINES_H_ */ #endif /* _CPT_MCODE_DEFINES_H_ */

View File

@ -1,185 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Cavium, Inc
*/
#ifndef _CPT_REQUEST_MGR_H_
#define _CPT_REQUEST_MGR_H_
#include <rte_branch_prediction.h>
#include <rte_cycles.h>
#include "cpt_common.h"
#include "cpt_mcode_defines.h"
#if CPT_MODEL == CRYPTO_OCTEONTX
#include "../../crypto/octeontx/otx_cryptodev_hw_access.h"
#endif
/*
* This file defines the agreement between the common layer and the individual
* crypto drivers for OCTEON TX series. Datapath in otx* directory include this
* file and all these functions are static inlined for better performance.
*
*/
/*
* Get the session size
*
* This function is used in the data path.
*
* @return
* - session size
*/
static __rte_always_inline unsigned int
cpt_get_session_size(void)
{
unsigned int ctx_len = sizeof(struct cpt_ctx);
return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
}
static __rte_always_inline int32_t __hot
cpt_enqueue_req(struct cpt_instance *instance, struct pending_queue *pqueue,
void *req)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
int32_t ret = 0;
if (unlikely(!req))
return 0;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
return -EAGAIN;
fill_cpt_inst(instance, req);
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
/* Fill time_out cycles */
user_req->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
user_req->extra_time = 0;
/* Default mode of software queue */
mark_cpt_inst(instance);
pqueue->rid_queue[pqueue->enq_tail].rid =
(uintptr_t)user_req;
/* We will use soft queue length here to limit
* requests
*/
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
pqueue->pending_count += 1;
CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
"op: %p", user_req, user_req->op);
return ret;
}
static __rte_always_inline int __hot
cpt_pmd_crypto_operation(struct cpt_instance *instance,
struct rte_crypto_op *op, struct pending_queue *pqueue,
uint8_t cpt_driver_id)
{
struct cpt_sess_misc *sess = NULL;
struct rte_crypto_sym_op *sym_op = op->sym;
void *prep_req = NULL, *mdata = NULL;
int ret = 0;
uint64_t cpt_op;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
int sess_len;
sess_len = cpt_get_session_size();
sess = rte_calloc(__func__, 1, sess_len, 8);
if (!sess)
return -ENOMEM;
sess->ctx_dma_addr = rte_malloc_virt2iova(sess) +
sizeof(struct cpt_sess_misc);
ret = instance_session_cfg(sym_op->xform, (void *)sess);
if (unlikely(ret))
return -EINVAL;
} else {
sess = (struct cpt_sess_misc *)
get_sym_session_private_data(sym_op->session,
cpt_driver_id);
}
cpt_op = sess->cpt_op;
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
&prep_req);
else
ret = fill_digest_params(op, sess, &cptvf->meta_info,
&mdata, &prep_req);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
return ret;
}
/* Enqueue prepared instruction to HW */
ret = cpt_enqueue_req(instance, pqueue, prep_req);
if (unlikely(ret)) {
if (unlikely(ret == -EAGAIN))
goto req_fail;
CPT_LOG_DP_ERR("Error enqueing crypto request : error "
"code %d", ret);
goto req_fail;
}
return 0;
req_fail:
if (mdata)
free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
return ret;
}
static __rte_always_inline int32_t __hot
cpt_dequeue_burst(struct cpt_instance *instance, uint16_t cnt,
void *resp[], uint8_t cc[], struct pending_queue *pqueue)
{
struct cpt_request_info *user_req;
struct rid *rid_e;
int i, count, pcount;
uint8_t ret;
pcount = pqueue->pending_count;
count = (cnt > pcount) ? pcount : cnt;
for (i = 0; i < count; i++) {
rid_e = &pqueue->rid_queue[pqueue->deq_head];
user_req = (struct cpt_request_info *)(rid_e->rid);
if (likely((i+1) < count))
rte_prefetch_non_temporal((void *)rid_e[1].rid);
ret = check_nb_command_id(user_req, instance);
if (unlikely(ret == ERR_REQ_PENDING)) {
/* Stop checking for completions */
break;
}
/* Return completion code and op handle */
cc[i] = (uint8_t)ret;
resp[i] = user_req->op;
CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
user_req, user_req->op, ret);
MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
pqueue->pending_count -= 1;
}
return i;
}
#endif /* _CPT_REQUEST_MGR_H_ */

View File

@ -20,8 +20,9 @@ LDLIBS += -lrte_common_cpt
VPATH += $(RTE_SDK)/drivers/crypto/octeontx VPATH += $(RTE_SDK)/drivers/crypto/octeontx
CFLAGS += -O3 -DCPT_MODEL=CRYPTO_OCTEONTX CFLAGS += -O3
CFLAGS += -I$(RTE_SDK)/drivers/common/cpt CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
CFLAGS += -DALLOW_EXPERIMENTAL_API
# PMD code # PMD code
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev.c

View File

@ -8,6 +8,7 @@ deps += ['bus_pci']
deps += ['common_cpt'] deps += ['common_cpt']
name = 'octeontx_crypto' name = 'octeontx_crypto'
allow_experimental_apis = true
sources = files('otx_cryptodev.c', sources = files('otx_cryptodev.c',
'otx_cryptodev_capabilities.c', 'otx_cryptodev_capabilities.c',
'otx_cryptodev_hw_access.c', 'otx_cryptodev_hw_access.c',
@ -15,4 +16,3 @@ sources = files('otx_cryptodev.c',
'otx_cryptodev_ops.c') 'otx_cryptodev_ops.c')
includes += include_directories('../../common/cpt') includes += include_directories('../../common/cpt')
cflags += '-DCPT_MODEL=CRYPTO_OCTEONTX'

View File

@ -39,6 +39,8 @@
struct cpt_instance { struct cpt_instance {
uint32_t queue_id; uint32_t queue_id;
uintptr_t rsvd; uintptr_t rsvd;
struct rte_mempool *sess_mp;
struct rte_mempool *sess_mp_priv;
}; };
struct command_chunk { struct command_chunk {

View File

@ -11,7 +11,6 @@
#include "cpt_pmd_logs.h" #include "cpt_pmd_logs.h"
#include "cpt_pmd_ops_helper.h" #include "cpt_pmd_ops_helper.h"
#include "cpt_ucode.h" #include "cpt_ucode.h"
#include "cpt_request_mgr.h"
#include "otx_cryptodev.h" #include "otx_cryptodev.h"
#include "otx_cryptodev_capabilities.h" #include "otx_cryptodev_capabilities.h"
@ -222,6 +221,8 @@ otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
} }
instance->queue_id = que_pair_id; instance->queue_id = que_pair_id;
instance->sess_mp = qp_conf->mp_session;
instance->sess_mp_priv = qp_conf->mp_session_private;
dev->data->queue_pairs[que_pair_id] = instance; dev->data->queue_pairs[que_pair_id] = instance;
return 0; return 0;
@ -340,11 +341,160 @@ otx_cpt_session_clear(struct rte_cryptodev *dev,
} }
} }
static __rte_always_inline int32_t __hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
void *req)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
return -EAGAIN;
fill_cpt_inst(instance, req);
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
/* Fill time_out cycles */
user_req->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
user_req->extra_time = 0;
/* Default mode of software queue */
mark_cpt_inst(instance);
pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
/* We will use soft queue length here to limit requests */
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
pqueue->pending_count += 1;
CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
"op: %p", user_req, user_req->op);
return 0;
}
static __rte_always_inline int __hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
{
struct cpt_sess_misc *sess;
struct rte_crypto_sym_op *sym_op = op->sym;
void *prep_req, *mdata = NULL;
int ret = 0;
uint64_t cpt_op;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
sess = (struct cpt_sess_misc *)
get_sym_session_private_data(sym_op->session,
otx_cryptodev_driver_id);
cpt_op = sess->cpt_op;
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
&prep_req);
else
ret = fill_digest_params(op, sess, &cptvf->meta_info,
&mdata, &prep_req);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
return ret;
}
/* Enqueue prepared instruction to h/w */
ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
if (unlikely(ret)) {
/* Buffer allocated for request preparation need to be freed */
free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
return ret;
}
return 0;
}
static __rte_always_inline int __hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
{
struct cpt_sess_misc *sess;
struct rte_crypto_sym_op *sym_op = op->sym;
int ret;
void *sess_t = NULL;
void *sess_private_data_t = NULL;
/* Create tmp session */
if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) {
ret = -ENOMEM;
goto exit;
}
if (rte_mempool_get(instance->sess_mp_priv,
(void **)&sess_private_data_t)) {
ret = -ENOMEM;
goto free_sess;
}
sess = (struct cpt_sess_misc *)sess_private_data_t;
sess->ctx_dma_addr = rte_mempool_virt2iova(sess) +
sizeof(struct cpt_sess_misc);
ret = instance_session_cfg(sym_op->xform, (void *)sess);
if (unlikely(ret)) {
ret = -EINVAL;
goto free_sess_priv;
}
/* Save tmp session in op */
sym_op->session = (struct rte_cryptodev_sym_session *)sess_t;
set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id,
sess_private_data_t);
/* Enqueue op with the tmp session set */
ret = otx_cpt_enq_single_sym(instance, op, pqueue);
if (unlikely(ret))
goto free_sess_priv;
return 0;
free_sess_priv:
rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
free_sess:
rte_mempool_put(instance->sess_mp, sess_t);
exit:
return ret;
}
static __rte_always_inline int __hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
{
/* Check for the type */
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
return otx_cpt_enq_single_sym(inst, op, pqueue);
else if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
return otx_cpt_enq_single_sym_sessless(inst, op, pqueue);
/* Should not reach here */
return -EINVAL;
}
static uint16_t static uint16_t
otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{ {
struct cpt_instance *instance = (struct cpt_instance *)qptr; struct cpt_instance *instance = (struct cpt_instance *)qptr;
uint16_t count = 0; uint16_t count;
int ret; int ret;
struct cpt_vf *cptvf = (struct cpt_vf *)instance; struct cpt_vf *cptvf = (struct cpt_vf *)instance;
struct pending_queue *pqueue = &cptvf->pqueue; struct pending_queue *pqueue = &cptvf->pqueue;
@ -355,8 +505,10 @@ otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
count = 0; count = 0;
while (likely(count < nb_ops)) { while (likely(count < nb_ops)) {
ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
otx_cryptodev_driver_id); /* Enqueue single op */
ret = otx_cpt_enq_single(instance, ops[count], pqueue);
if (unlikely(ret)) if (unlikely(ret))
break; break;
count++; count++;
@ -365,48 +517,110 @@ otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count; return count;
} }
static __rte_always_inline void
otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
{
/* H/w has returned success */
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Perform further post processing */
if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
/* Check if auth verify need to be completed */
if (unlikely(rsp[2]))
compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
return;
}
}
static uint16_t static uint16_t
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{ {
struct cpt_instance *instance = (struct cpt_instance *)qptr; struct cpt_instance *instance = (struct cpt_instance *)qptr;
struct cpt_request_info *user_req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance; struct cpt_vf *cptvf = (struct cpt_vf *)instance;
struct rid *rid_e;
uint8_t cc[nb_ops];
int i, count, pcount;
uint8_t ret;
int nb_completed;
struct pending_queue *pqueue = &cptvf->pqueue; struct pending_queue *pqueue = &cptvf->pqueue;
uint16_t nb_completed, i = 0;
uint8_t compcode[nb_ops];
nb_completed = cpt_dequeue_burst(instance, nb_ops,
(void **)ops, compcode, pqueue);
while (likely(i < nb_completed)) {
struct rte_crypto_op *cop; struct rte_crypto_op *cop;
void *metabuf; void *metabuf;
uintptr_t *rsp; uintptr_t *rsp;
uint8_t status;
pcount = pqueue->pending_count;
count = (nb_ops > pcount) ? pcount : nb_ops;
for (i = 0; i < count; i++) {
rid_e = &pqueue->rid_queue[pqueue->deq_head];
user_req = (struct cpt_request_info *)(rid_e->rid);
if (likely((i+1) < count))
rte_prefetch_non_temporal((void *)rid_e[1].rid);
ret = check_nb_command_id(user_req, instance);
if (unlikely(ret == ERR_REQ_PENDING)) {
/* Stop checking for completions */
break;
}
/* Return completion code and op handle */
cc[i] = ret;
ops[i] = user_req->op;
CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
user_req, user_req->op, ret);
MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
pqueue->pending_count -= 1;
}
nb_completed = i;
for (i = 0; i < nb_completed; i++) {
rsp = (void *)ops[i]; rsp = (void *)ops[i];
status = compcode[i];
if (likely((i + 1) < nb_completed)) if (likely((i + 1) < nb_completed))
rte_prefetch0(ops[i+1]); rte_prefetch0(ops[i+1]);
metabuf = (void *)rsp[0]; metabuf = (void *)rsp[0];
cop = (void *)rsp[1]; cop = (void *)rsp[1];
ops[i] = cop; ops[i] = cop;
if (likely(status == 0)) { /* Check completion code */
if (likely(!rsp[2]))
cop->status = if (likely(cc[i] == 0)) {
RTE_CRYPTO_OP_STATUS_SUCCESS; /* H/w success pkt. Post process */
else otx_cpt_dequeue_post_process(cop, rsp);
compl_auth_verify(cop, (uint8_t *)rsp[2], } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
rsp[3]);
} else if (status == ERR_GC_ICV_MISCOMPARE) {
/* auth data mismatch */ /* auth data mismatch */
cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else { } else {
/* Error */
cop->status = RTE_CRYPTO_OP_STATUS_ERROR; cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
} }
free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
i++; if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
void *sess_private_data_t =
get_sym_session_private_data(cop->sym->session,
otx_cryptodev_driver_id);
memset(sess_private_data_t, 0,
cpt_get_session_size());
memset(cop->sym->session, 0,
rte_cryptodev_sym_get_existing_header_session_size(
cop->sym->session));
rte_mempool_put(instance->sess_mp_priv,
sess_private_data_t);
rte_mempool_put(instance->sess_mp, cop->sym->session);
cop->sym->session = NULL;
} }
free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
}
return nb_completed; return nb_completed;
} }