2017-12-19 10:14:41 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2017-10-09 14:21:40 +00:00
|
|
|
*
|
|
|
|
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
|
2019-03-27 11:53:36 +00:00
|
|
|
* Copyright 2017-2019 NXP
|
2017-10-09 14:21:40 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sched.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
|
|
|
|
#include <rte_byteorder.h>
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_cryptodev_pmd.h>
|
|
|
|
#include <rte_crypto.h>
|
|
|
|
#include <rte_cryptodev.h>
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-15 06:35:53 +00:00
|
|
|
#include <rte_security_driver.h>
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2017-10-09 14:21:40 +00:00
|
|
|
#include <rte_cycles.h>
|
|
|
|
#include <rte_dev.h>
|
|
|
|
#include <rte_kvargs.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_mbuf.h>
|
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_string_fns.h>
|
2018-08-30 05:50:58 +00:00
|
|
|
#include <rte_spinlock.h>
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
#include <fsl_usd.h>
|
|
|
|
#include <fsl_qman.h>
|
2019-10-10 06:32:21 +00:00
|
|
|
#include <dpaa_of.h>
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
/* RTA header files */
|
2019-08-13 07:20:16 +00:00
|
|
|
#include <desc/common.h>
|
|
|
|
#include <desc/algo.h>
|
|
|
|
#include <desc/ipsec.h>
|
|
|
|
#include <desc/pdcp.h>
|
2020-10-12 14:10:03 +00:00
|
|
|
#include <desc/sdap.h>
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
#include <rte_dpaa_bus.h>
|
|
|
|
#include <dpaa_sec.h>
|
2019-10-04 11:26:58 +00:00
|
|
|
#include <dpaa_sec_event.h>
|
2017-10-09 14:21:40 +00:00
|
|
|
#include <dpaa_sec_log.h>
|
2019-09-12 08:38:21 +00:00
|
|
|
#include <dpaax_iova_table.h>
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
static uint8_t cryptodev_driver_id;
|
|
|
|
|
2018-01-15 06:35:52 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static inline void
|
|
|
|
dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (!ctx->fd_status) {
|
|
|
|
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
|
|
|
|
} else {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
|
2017-10-09 14:21:40 +00:00
|
|
|
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct dpaa_sec_op_ctx *
|
2019-09-30 14:41:02 +00:00
|
|
|
dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
|
2017-10-09 14:21:40 +00:00
|
|
|
{
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
2019-09-30 14:41:02 +00:00
|
|
|
int i, retval;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-09-30 14:41:03 +00:00
|
|
|
retval = rte_mempool_get(
|
|
|
|
ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
|
|
|
|
(void **)(&ctx));
|
2017-10-09 14:21:40 +00:00
|
|
|
if (!ctx || retval) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
|
2017-10-09 14:21:40 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Clear SG memory. There are 16 SG entries of 16 Bytes each.
|
|
|
|
* one call to dcbz_64() clear 64 bytes, hence calling it 4 times
|
|
|
|
* to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
|
|
|
|
* each packet, memset is costlier than dcbz_64().
|
|
|
|
*/
|
2019-09-30 14:41:02 +00:00
|
|
|
for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
|
|
|
|
dcbz_64(&ctx->job.sg[i]);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-09-30 14:41:03 +00:00
|
|
|
ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
|
|
|
|
struct qman_fq *fq,
|
|
|
|
const struct qm_mr_entry *msg)
|
|
|
|
{
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
|
|
|
|
fq->fqid, msg->ern.rc, msg->ern.seqnum);
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize the queue with dest chan as caam chan so that
|
|
|
|
* all the packets in this queue could be dispatched into caam
|
|
|
|
*/
|
|
|
|
static int
|
2017-10-20 12:31:31 +00:00
|
|
|
dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
|
2017-10-09 14:21:40 +00:00
|
|
|
uint32_t fqid_out)
|
|
|
|
{
|
|
|
|
struct qm_mcc_initfq fq_opts;
|
|
|
|
uint32_t flags;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* Clear FQ options */
|
|
|
|
memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
|
|
|
|
|
|
|
|
flags = QMAN_INITFQ_FLAG_SCHED;
|
|
|
|
fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
|
|
|
|
QM_INITFQ_WE_CONTEXTB;
|
|
|
|
|
|
|
|
qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
|
|
|
|
fq_opts.fqd.context_b = fqid_out;
|
2020-05-15 09:47:48 +00:00
|
|
|
fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
|
2017-10-09 14:21:40 +00:00
|
|
|
fq_opts.fqd.dest.wq = 0;
|
|
|
|
|
|
|
|
fq_in->cb.ern = ern_sec_fq_handler;
|
|
|
|
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
|
2018-01-15 06:35:52 +00:00
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
ret = qman_init_fq(fq_in, flags, &fq_opts);
|
|
|
|
if (unlikely(ret != 0))
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("qman_init_fq failed %d", ret);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* something is put into in_fq and caam put the crypto result into out_fq */
|
|
|
|
static enum qman_cb_dqrr_result
|
|
|
|
dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
|
|
|
|
struct qman_fq *fq __always_unused,
|
|
|
|
const struct qm_dqrr_entry *dqrr)
|
|
|
|
{
|
|
|
|
const struct qm_fd *fd;
|
|
|
|
struct dpaa_sec_job *job;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
|
2020-07-07 09:22:27 +00:00
|
|
|
if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
|
2017-10-09 14:21:40 +00:00
|
|
|
return qman_cb_dqrr_defer;
|
|
|
|
|
|
|
|
if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
|
|
|
|
return qman_cb_dqrr_consume;
|
|
|
|
|
|
|
|
fd = &dqrr->fd;
|
|
|
|
/* sg is embedded in an op ctx,
|
|
|
|
* sg[0] is for output
|
|
|
|
* sg[1] for input
|
|
|
|
*/
|
2020-01-27 09:07:23 +00:00
|
|
|
job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
|
2018-01-15 06:35:53 +00:00
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
|
|
|
|
ctx->fd_status = fd->status;
|
2018-01-15 06:35:53 +00:00
|
|
|
if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
|
|
|
|
struct qm_sg_entry *sg_out;
|
|
|
|
uint32_t len;
|
2019-09-30 14:40:48 +00:00
|
|
|
struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
|
|
|
|
ctx->op->sym->m_src : ctx->op->sym->m_dst;
|
2018-01-15 06:35:53 +00:00
|
|
|
|
|
|
|
sg_out = &job->sg[0];
|
|
|
|
hw_sg_to_cpu(sg_out);
|
|
|
|
len = sg_out->length;
|
2019-09-30 14:40:48 +00:00
|
|
|
mbuf->pkt_len = len;
|
|
|
|
while (mbuf->next != NULL) {
|
|
|
|
len -= mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
mbuf->data_len = len;
|
2018-01-15 06:35:53 +00:00
|
|
|
}
|
2020-07-07 09:22:27 +00:00
|
|
|
DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
|
2017-10-09 14:21:40 +00:00
|
|
|
dpaa_sec_op_ending(ctx);
|
|
|
|
|
|
|
|
return qman_cb_dqrr_consume;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* caam result is put into this queue */
|
|
|
|
static int
|
|
|
|
dpaa_sec_init_tx(struct qman_fq *fq)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct qm_mcc_initfq opts;
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
|
|
|
|
QMAN_FQ_FLAG_DYNAMIC_FQID;
|
|
|
|
|
|
|
|
ret = qman_create_fq(0, flags, fq);
|
|
|
|
if (unlikely(ret)) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("qman_create_fq failed");
|
2017-10-09 14:21:40 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&opts, 0, sizeof(opts));
|
|
|
|
opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
|
|
|
|
QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
|
|
|
|
|
|
|
|
/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
|
|
|
|
|
|
|
|
fq->cb.dqrr = dqrr_out_fq_cb_rx;
|
|
|
|
fq->cb.ern = ern_sec_fq_handler;
|
|
|
|
|
|
|
|
ret = qman_init_fq(fq, 0, &opts);
|
|
|
|
if (unlikely(ret)) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("unable to init caam source fq!");
|
2017-10-09 14:21:40 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
crypto/dpaa_sec: improve memory freeing
This patch fixes management of memory for authentication
and encryption keys.
There were two issues with former state of implementation:
1) Invalid access to dpaa_sec_session union members
The dpaa_sec_session structure includes an anonymous union:
union {
struct {...} aead_key;
struct {
struct {...} cipher_key;
struct {...} auth_key;
};
};
Depending on the used algorithm a rte_zmalloc() function
allocated memory that was kept in aead_key, cipher_key
or auth_key. However every time the memory was released,
rte_free() was called only on cipher and auth keys, even
if pointer to allocated memory was stored in aead_key.
The C language specification defines such behavior as undefined.
As the cipher_key and aead_key are similar, have same sizes and
alignment, it has worked, but it's directly against C specification.
This patch fixes this, providing a free_session_data() function
to free the keys data. It verifies which algorithm was used
(aead or auth+cipher) and frees proper part of the union.
2) Some keys might have been freed multiple times
In functions like: dpaa_sec_cipher_init(), dpaa_sec_auth_init(),
dpaa_sec_chain_init(), dpaa_sec_aead_init() keys data were freed
before returning due to some error conditions. However the pointers
were not zeroed causing another calls to ret_free from higher
layers of code. This causes an error log about invalid memory address
to be printed.
This patch fixes it by making only one layer responsible for freeing
memory
Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
2020-05-05 21:41:04 +00:00
|
|
|
static inline int is_aead(dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
return ((ses->cipher_alg == 0) &&
|
|
|
|
(ses->auth_alg == 0) &&
|
|
|
|
(ses->aead_alg != 0));
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static inline int is_encode(dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
return ses->dir == DIR_ENC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_decode(dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
return ses->dir == DIR_DEC;
|
|
|
|
}
|
|
|
|
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2019-01-09 15:13:16 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct alginfo authdata = {0}, cipherdata = {0};
|
|
|
|
struct sec_cdb *cdb = &ses->cdb;
|
2019-09-30 14:40:42 +00:00
|
|
|
struct alginfo *p_authdata = NULL;
|
2019-01-09 15:13:16 +00:00
|
|
|
int32_t shared_desc_len = 0;
|
|
|
|
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
|
|
|
|
int swap = false;
|
|
|
|
#else
|
|
|
|
int swap = true;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cipherdata.key = (size_t)ses->cipher_key.data;
|
|
|
|
cipherdata.keylen = ses->cipher_key.length;
|
|
|
|
cipherdata.key_enc_flags = 0;
|
|
|
|
cipherdata.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
cipherdata.algtype = ses->cipher_key.alg;
|
|
|
|
cipherdata.algmode = ses->cipher_key.algmode;
|
2019-01-09 15:13:16 +00:00
|
|
|
|
2019-09-30 14:40:42 +00:00
|
|
|
if (ses->auth_alg) {
|
2019-01-09 15:13:16 +00:00
|
|
|
authdata.key = (size_t)ses->auth_key.data;
|
|
|
|
authdata.keylen = ses->auth_key.length;
|
|
|
|
authdata.key_enc_flags = 0;
|
|
|
|
authdata.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
authdata.algtype = ses->auth_key.alg;
|
|
|
|
authdata.algmode = ses->auth_key.algmode;
|
2019-01-09 15:13:16 +00:00
|
|
|
|
2019-09-30 14:40:42 +00:00
|
|
|
p_authdata = &authdata;
|
|
|
|
}
|
2019-01-09 15:13:16 +00:00
|
|
|
|
2020-06-01 17:17:45 +00:00
|
|
|
if (rta_inline_pdcp_query(authdata.algtype,
|
|
|
|
cipherdata.algtype,
|
|
|
|
ses->pdcp.sn_size,
|
|
|
|
ses->pdcp.hfn_ovd)) {
|
2019-09-30 14:40:42 +00:00
|
|
|
cipherdata.key =
|
2020-06-01 17:17:45 +00:00
|
|
|
(size_t)rte_dpaa_mem_vtop((void *)
|
|
|
|
(size_t)cipherdata.key);
|
2019-09-30 14:40:42 +00:00
|
|
|
cipherdata.key_type = RTA_DATA_PTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
|
2019-01-09 15:13:16 +00:00
|
|
|
if (ses->dir == DIR_ENC)
|
|
|
|
shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
|
|
|
|
cdb->sh_desc, 1, swap,
|
|
|
|
ses->pdcp.hfn,
|
2019-09-30 14:40:41 +00:00
|
|
|
ses->pdcp.sn_size,
|
2019-01-09 15:13:16 +00:00
|
|
|
ses->pdcp.bearer,
|
|
|
|
ses->pdcp.pkt_dir,
|
|
|
|
ses->pdcp.hfn_threshold,
|
|
|
|
&cipherdata, &authdata,
|
|
|
|
0);
|
|
|
|
else if (ses->dir == DIR_DEC)
|
|
|
|
shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
|
|
|
|
cdb->sh_desc, 1, swap,
|
|
|
|
ses->pdcp.hfn,
|
2019-09-30 14:40:41 +00:00
|
|
|
ses->pdcp.sn_size,
|
2019-01-09 15:13:16 +00:00
|
|
|
ses->pdcp.bearer,
|
|
|
|
ses->pdcp.pkt_dir,
|
|
|
|
ses->pdcp.hfn_threshold,
|
|
|
|
&cipherdata, &authdata,
|
|
|
|
0);
|
|
|
|
} else {
|
2020-10-12 14:10:03 +00:00
|
|
|
if (ses->dir == DIR_ENC) {
|
|
|
|
if (ses->pdcp.sdap_enabled)
|
|
|
|
shared_desc_len =
|
|
|
|
cnstr_shdsc_pdcp_sdap_u_plane_encap(
|
|
|
|
cdb->sh_desc, 1, swap,
|
|
|
|
ses->pdcp.sn_size,
|
|
|
|
ses->pdcp.hfn,
|
|
|
|
ses->pdcp.bearer,
|
|
|
|
ses->pdcp.pkt_dir,
|
|
|
|
ses->pdcp.hfn_threshold,
|
|
|
|
&cipherdata, p_authdata, 0);
|
|
|
|
else
|
|
|
|
shared_desc_len =
|
|
|
|
cnstr_shdsc_pdcp_u_plane_encap(
|
|
|
|
cdb->sh_desc, 1, swap,
|
|
|
|
ses->pdcp.sn_size,
|
|
|
|
ses->pdcp.hfn,
|
|
|
|
ses->pdcp.bearer,
|
|
|
|
ses->pdcp.pkt_dir,
|
|
|
|
ses->pdcp.hfn_threshold,
|
|
|
|
&cipherdata, p_authdata, 0);
|
|
|
|
} else if (ses->dir == DIR_DEC) {
|
|
|
|
if (ses->pdcp.sdap_enabled)
|
|
|
|
shared_desc_len =
|
|
|
|
cnstr_shdsc_pdcp_sdap_u_plane_decap(
|
|
|
|
cdb->sh_desc, 1, swap,
|
|
|
|
ses->pdcp.sn_size,
|
|
|
|
ses->pdcp.hfn,
|
|
|
|
ses->pdcp.bearer,
|
|
|
|
ses->pdcp.pkt_dir,
|
|
|
|
ses->pdcp.hfn_threshold,
|
|
|
|
&cipherdata, p_authdata, 0);
|
|
|
|
else
|
|
|
|
shared_desc_len =
|
|
|
|
cnstr_shdsc_pdcp_u_plane_decap(
|
|
|
|
cdb->sh_desc, 1, swap,
|
|
|
|
ses->pdcp.sn_size,
|
|
|
|
ses->pdcp.hfn,
|
|
|
|
ses->pdcp.bearer,
|
|
|
|
ses->pdcp.pkt_dir,
|
|
|
|
ses->pdcp.hfn_threshold,
|
|
|
|
&cipherdata, p_authdata, 0);
|
|
|
|
}
|
2019-01-09 15:13:16 +00:00
|
|
|
}
|
|
|
|
return shared_desc_len;
|
|
|
|
}
|
|
|
|
|
2018-08-30 05:51:05 +00:00
|
|
|
/* prepare ipsec proto command block of the session */
|
|
|
|
static int
|
|
|
|
dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct alginfo cipherdata = {0}, authdata = {0};
|
|
|
|
struct sec_cdb *cdb = &ses->cdb;
|
|
|
|
int32_t shared_desc_len = 0;
|
|
|
|
int err;
|
|
|
|
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
|
|
|
|
int swap = false;
|
|
|
|
#else
|
|
|
|
int swap = true;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cipherdata.key = (size_t)ses->cipher_key.data;
|
|
|
|
cipherdata.keylen = ses->cipher_key.length;
|
|
|
|
cipherdata.key_enc_flags = 0;
|
|
|
|
cipherdata.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
cipherdata.algtype = ses->cipher_key.alg;
|
|
|
|
cipherdata.algmode = ses->cipher_key.algmode;
|
2018-08-30 05:51:05 +00:00
|
|
|
|
2019-11-06 05:17:25 +00:00
|
|
|
if (ses->auth_key.length) {
|
|
|
|
authdata.key = (size_t)ses->auth_key.data;
|
|
|
|
authdata.keylen = ses->auth_key.length;
|
|
|
|
authdata.key_enc_flags = 0;
|
|
|
|
authdata.key_type = RTA_DATA_IMM;
|
|
|
|
authdata.algtype = ses->auth_key.alg;
|
|
|
|
authdata.algmode = ses->auth_key.algmode;
|
|
|
|
}
|
2018-08-30 05:51:05 +00:00
|
|
|
|
|
|
|
cdb->sh_desc[0] = cipherdata.keylen;
|
|
|
|
cdb->sh_desc[1] = authdata.keylen;
|
|
|
|
err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
|
2020-06-04 20:04:10 +00:00
|
|
|
DESC_JOB_IO_LEN,
|
2018-08-30 05:51:05 +00:00
|
|
|
(unsigned int *)cdb->sh_desc,
|
|
|
|
&cdb->sh_desc[2], 2);
|
|
|
|
|
|
|
|
if (err < 0) {
|
|
|
|
DPAA_SEC_ERR("Crypto: Incorrect key lengths");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
if (cdb->sh_desc[2] & 1)
|
|
|
|
cipherdata.key_type = RTA_DATA_IMM;
|
|
|
|
else {
|
2020-01-27 09:07:23 +00:00
|
|
|
cipherdata.key = (size_t)rte_dpaa_mem_vtop(
|
2018-08-30 05:51:05 +00:00
|
|
|
(void *)(size_t)cipherdata.key);
|
|
|
|
cipherdata.key_type = RTA_DATA_PTR;
|
|
|
|
}
|
|
|
|
if (cdb->sh_desc[2] & (1<<1))
|
|
|
|
authdata.key_type = RTA_DATA_IMM;
|
|
|
|
else {
|
2020-01-27 09:07:23 +00:00
|
|
|
authdata.key = (size_t)rte_dpaa_mem_vtop(
|
2018-08-30 05:51:05 +00:00
|
|
|
(void *)(size_t)authdata.key);
|
|
|
|
authdata.key_type = RTA_DATA_PTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
cdb->sh_desc[0] = 0;
|
|
|
|
cdb->sh_desc[1] = 0;
|
|
|
|
cdb->sh_desc[2] = 0;
|
|
|
|
if (ses->dir == DIR_ENC) {
|
|
|
|
shared_desc_len = cnstr_shdsc_ipsec_new_encap(
|
|
|
|
cdb->sh_desc,
|
|
|
|
true, swap, SHR_SERIAL,
|
|
|
|
&ses->encap_pdb,
|
|
|
|
(uint8_t *)&ses->ip4_hdr,
|
|
|
|
&cipherdata, &authdata);
|
|
|
|
} else if (ses->dir == DIR_DEC) {
|
|
|
|
shared_desc_len = cnstr_shdsc_ipsec_new_decap(
|
|
|
|
cdb->sh_desc,
|
|
|
|
true, swap, SHR_SERIAL,
|
|
|
|
&ses->decap_pdb,
|
|
|
|
&cipherdata, &authdata);
|
|
|
|
}
|
|
|
|
return shared_desc_len;
|
|
|
|
}
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2017-10-09 14:21:40 +00:00
|
|
|
/* prepare command block of the session */
|
|
|
|
static int
|
|
|
|
dpaa_sec_prep_cdb(dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
|
2018-04-05 08:35:48 +00:00
|
|
|
int32_t shared_desc_len = 0;
|
2018-01-15 06:35:52 +00:00
|
|
|
struct sec_cdb *cdb = &ses->cdb;
|
2017-10-09 14:21:40 +00:00
|
|
|
int err;
|
|
|
|
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
|
|
|
|
int swap = false;
|
|
|
|
#else
|
|
|
|
int swap = true;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
memset(cdb, 0, sizeof(struct sec_cdb));
|
|
|
|
|
2019-10-14 06:53:30 +00:00
|
|
|
switch (ses->ctxt) {
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2019-10-14 06:53:30 +00:00
|
|
|
case DPAA_SEC_IPSEC:
|
2018-08-30 05:51:05 +00:00
|
|
|
shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_PDCP:
|
2019-01-09 15:13:16 +00:00
|
|
|
shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2019-10-14 06:53:30 +00:00
|
|
|
case DPAA_SEC_CIPHER:
|
2018-03-14 07:55:59 +00:00
|
|
|
alginfo_c.key = (size_t)ses->cipher_key.data;
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo_c.keylen = ses->cipher_key.length;
|
|
|
|
alginfo_c.key_enc_flags = 0;
|
|
|
|
alginfo_c.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
alginfo_c.algtype = ses->cipher_key.alg;
|
|
|
|
alginfo_c.algmode = ses->cipher_key.algmode;
|
|
|
|
|
2019-10-14 06:53:28 +00:00
|
|
|
switch (ses->cipher_alg) {
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CBC:
|
|
|
|
case RTE_CRYPTO_CIPHER_3DES_CBC:
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CTR:
|
|
|
|
case RTE_CRYPTO_CIPHER_3DES_CTR:
|
|
|
|
shared_desc_len = cnstr_shdsc_blkcipher(
|
|
|
|
cdb->sh_desc, true,
|
|
|
|
swap, SHR_NEVER, &alginfo_c,
|
|
|
|
ses->iv.length,
|
|
|
|
ses->dir);
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
|
|
|
|
shared_desc_len = cnstr_shdsc_snow_f8(
|
|
|
|
cdb->sh_desc, true, swap,
|
|
|
|
&alginfo_c,
|
|
|
|
ses->dir);
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
|
|
|
|
shared_desc_len = cnstr_shdsc_zuce(
|
|
|
|
cdb->sh_desc, true, swap,
|
|
|
|
&alginfo_c,
|
|
|
|
ses->dir);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("unsupported cipher alg %d",
|
|
|
|
ses->cipher_alg);
|
2017-10-09 14:21:40 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_AUTH:
|
2018-03-14 07:55:59 +00:00
|
|
|
alginfo_a.key = (size_t)ses->auth_key.data;
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo_a.keylen = ses->auth_key.length;
|
|
|
|
alginfo_a.key_enc_flags = 0;
|
|
|
|
alginfo_a.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
alginfo_a.algtype = ses->auth_key.alg;
|
|
|
|
alginfo_a.algmode = ses->auth_key.algmode;
|
2019-10-14 06:53:28 +00:00
|
|
|
switch (ses->auth_alg) {
|
|
|
|
case RTE_CRYPTO_AUTH_MD5_HMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA1_HMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA224_HMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA256_HMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA384_HMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA512_HMAC:
|
|
|
|
shared_desc_len = cnstr_shdsc_hmac(
|
|
|
|
cdb->sh_desc, true,
|
|
|
|
swap, SHR_NEVER, &alginfo_a,
|
|
|
|
!ses->dir,
|
|
|
|
ses->digest_length);
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
|
|
|
|
shared_desc_len = cnstr_shdsc_snow_f9(
|
|
|
|
cdb->sh_desc, true, swap,
|
|
|
|
&alginfo_a,
|
|
|
|
!ses->dir,
|
|
|
|
ses->digest_length);
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_ZUC_EIA3:
|
|
|
|
shared_desc_len = cnstr_shdsc_zuca(
|
|
|
|
cdb->sh_desc, true, swap,
|
|
|
|
&alginfo_a,
|
|
|
|
!ses->dir,
|
|
|
|
ses->digest_length);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
|
|
|
|
}
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_AEAD:
|
2017-10-09 14:21:40 +00:00
|
|
|
if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("not supported aead alg");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2018-03-14 07:55:59 +00:00
|
|
|
alginfo.key = (size_t)ses->aead_key.data;
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo.keylen = ses->aead_key.length;
|
|
|
|
alginfo.key_enc_flags = 0;
|
|
|
|
alginfo.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
alginfo.algtype = ses->aead_key.alg;
|
|
|
|
alginfo.algmode = ses->aead_key.algmode;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
if (ses->dir == DIR_ENC)
|
|
|
|
shared_desc_len = cnstr_shdsc_gcm_encap(
|
2019-03-27 11:53:33 +00:00
|
|
|
cdb->sh_desc, true, swap, SHR_NEVER,
|
2017-10-09 14:21:40 +00:00
|
|
|
&alginfo,
|
|
|
|
ses->iv.length,
|
|
|
|
ses->digest_length);
|
|
|
|
else
|
|
|
|
shared_desc_len = cnstr_shdsc_gcm_decap(
|
2019-03-27 11:53:33 +00:00
|
|
|
cdb->sh_desc, true, swap, SHR_NEVER,
|
2017-10-09 14:21:40 +00:00
|
|
|
&alginfo,
|
|
|
|
ses->iv.length,
|
|
|
|
ses->digest_length);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_CIPHER_HASH:
|
2018-03-14 07:55:59 +00:00
|
|
|
alginfo_c.key = (size_t)ses->cipher_key.data;
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo_c.keylen = ses->cipher_key.length;
|
|
|
|
alginfo_c.key_enc_flags = 0;
|
|
|
|
alginfo_c.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
alginfo_c.algtype = ses->cipher_key.alg;
|
|
|
|
alginfo_c.algmode = ses->cipher_key.algmode;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2018-03-14 07:55:59 +00:00
|
|
|
alginfo_a.key = (size_t)ses->auth_key.data;
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo_a.keylen = ses->auth_key.length;
|
|
|
|
alginfo_a.key_enc_flags = 0;
|
|
|
|
alginfo_a.key_type = RTA_DATA_IMM;
|
2019-10-14 06:53:30 +00:00
|
|
|
alginfo_a.algtype = ses->auth_key.alg;
|
|
|
|
alginfo_a.algmode = ses->auth_key.algmode;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
cdb->sh_desc[0] = alginfo_c.keylen;
|
|
|
|
cdb->sh_desc[1] = alginfo_a.keylen;
|
|
|
|
err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
|
2020-06-04 20:04:10 +00:00
|
|
|
DESC_JOB_IO_LEN,
|
2017-10-09 14:21:40 +00:00
|
|
|
(unsigned int *)cdb->sh_desc,
|
|
|
|
&cdb->sh_desc[2], 2);
|
|
|
|
|
|
|
|
if (err < 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Crypto: Incorrect key lengths");
|
2017-10-09 14:21:40 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
if (cdb->sh_desc[2] & 1)
|
|
|
|
alginfo_c.key_type = RTA_DATA_IMM;
|
|
|
|
else {
|
2020-01-27 09:07:23 +00:00
|
|
|
alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
|
2018-03-14 07:55:59 +00:00
|
|
|
(void *)(size_t)alginfo_c.key);
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo_c.key_type = RTA_DATA_PTR;
|
|
|
|
}
|
|
|
|
if (cdb->sh_desc[2] & (1<<1))
|
|
|
|
alginfo_a.key_type = RTA_DATA_IMM;
|
|
|
|
else {
|
2020-01-27 09:07:23 +00:00
|
|
|
alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
|
2018-03-14 07:55:59 +00:00
|
|
|
(void *)(size_t)alginfo_a.key);
|
2017-10-09 14:21:40 +00:00
|
|
|
alginfo_a.key_type = RTA_DATA_PTR;
|
|
|
|
}
|
|
|
|
cdb->sh_desc[0] = 0;
|
|
|
|
cdb->sh_desc[1] = 0;
|
|
|
|
cdb->sh_desc[2] = 0;
|
2018-08-30 05:51:05 +00:00
|
|
|
/* Auth_only_len is set as 0 here and it will be
|
|
|
|
* overwritten in fd for each packet.
|
|
|
|
*/
|
|
|
|
shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
|
2019-03-27 11:53:33 +00:00
|
|
|
true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
|
2019-10-14 06:53:25 +00:00
|
|
|
ses->iv.length,
|
2018-08-30 05:51:05 +00:00
|
|
|
ses->digest_length, ses->dir);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_HASH_CIPHER:
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("error: Unsupported session");
|
|
|
|
return -ENOTSUP;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
2018-04-05 08:35:48 +00:00
|
|
|
|
|
|
|
if (shared_desc_len < 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("error in preparing command block");
|
2018-04-05 08:35:48 +00:00
|
|
|
return shared_desc_len;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
cdb->sh_hdr.hi.field.idlen = shared_desc_len;
|
|
|
|
cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
|
|
|
|
cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* qp is lockless, should be accessed by only one thread */
|
|
|
|
static int
|
|
|
|
dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
|
|
|
|
{
|
|
|
|
struct qman_fq *fq;
|
2018-01-11 11:44:06 +00:00
|
|
|
unsigned int pkts = 0;
|
2018-07-06 08:10:05 +00:00
|
|
|
int num_rx_bufs, ret;
|
2018-01-11 11:44:06 +00:00
|
|
|
struct qm_dqrr_entry *dq;
|
2018-07-06 08:10:05 +00:00
|
|
|
uint32_t vdqcr_flags = 0;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
fq = &qp->outq;
|
2018-07-06 08:10:05 +00:00
|
|
|
/*
|
|
|
|
* Until request for four buffers, we provide exact number of buffers.
|
|
|
|
* Otherwise we do not set the QM_VDQCR_EXACT flag.
|
|
|
|
* Not setting QM_VDQCR_EXACT flag can provide two more buffers than
|
|
|
|
* requested, so we request two less in this case.
|
|
|
|
*/
|
|
|
|
if (nb_ops < 4) {
|
|
|
|
vdqcr_flags = QM_VDQCR_EXACT;
|
|
|
|
num_rx_bufs = nb_ops;
|
|
|
|
} else {
|
|
|
|
num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
|
|
|
|
(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
|
|
|
|
}
|
|
|
|
ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
|
2018-01-11 11:44:06 +00:00
|
|
|
if (ret)
|
|
|
|
return 0;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2018-01-11 11:44:06 +00:00
|
|
|
do {
|
|
|
|
const struct qm_fd *fd;
|
|
|
|
struct dpaa_sec_job *job;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct rte_crypto_op *op;
|
|
|
|
|
|
|
|
dq = qman_dequeue(fq);
|
|
|
|
if (!dq)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
fd = &dq->fd;
|
|
|
|
/* sg is embedded in an op ctx,
|
|
|
|
* sg[0] is for output
|
|
|
|
* sg[1] for input
|
|
|
|
*/
|
2020-01-27 09:07:23 +00:00
|
|
|
job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
|
2018-01-11 11:44:06 +00:00
|
|
|
|
|
|
|
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
|
|
|
|
ctx->fd_status = fd->status;
|
|
|
|
op = ctx->op;
|
|
|
|
if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
|
|
|
|
struct qm_sg_entry *sg_out;
|
|
|
|
uint32_t len;
|
2019-09-30 14:40:48 +00:00
|
|
|
struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
|
|
|
|
op->sym->m_src : op->sym->m_dst;
|
2018-01-11 11:44:06 +00:00
|
|
|
|
|
|
|
sg_out = &job->sg[0];
|
|
|
|
hw_sg_to_cpu(sg_out);
|
|
|
|
len = sg_out->length;
|
2019-09-30 14:40:48 +00:00
|
|
|
mbuf->pkt_len = len;
|
|
|
|
while (mbuf->next != NULL) {
|
|
|
|
len -= mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
mbuf->data_len = len;
|
2018-01-11 11:44:06 +00:00
|
|
|
}
|
|
|
|
if (!ctx->fd_status) {
|
|
|
|
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
|
|
|
|
} else {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
|
2018-01-11 11:44:06 +00:00
|
|
|
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
|
|
|
|
}
|
|
|
|
ops[pkts++] = op;
|
|
|
|
|
|
|
|
/* report op status to sym->op and then free the ctx memeory */
|
|
|
|
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2018-01-11 11:44:06 +00:00
|
|
|
qman_dqrr_consume(fq, dq);
|
|
|
|
} while (fq->flags & QMAN_FQ_STATE_VDQCR);
|
|
|
|
|
|
|
|
return pkts;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
|
2018-01-22 08:46:38 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct rte_mbuf *mbuf = sym->m_src;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg, *out_sg, *in_sg;
|
|
|
|
phys_addr_t start_addr;
|
|
|
|
uint8_t *old_digest, extra_segs;
|
2019-10-14 06:53:28 +00:00
|
|
|
int data_len, data_offset;
|
|
|
|
|
|
|
|
data_len = sym->auth.data.length;
|
|
|
|
data_offset = sym->auth.data.offset;
|
|
|
|
|
|
|
|
if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
|
|
|
|
ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
|
|
|
|
if ((data_len & 7) || (data_offset & 7)) {
|
|
|
|
DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_len = data_len >> 3;
|
|
|
|
data_offset = data_offset >> 3;
|
|
|
|
}
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
if (is_decode(ses))
|
|
|
|
extra_segs = 3;
|
|
|
|
else
|
|
|
|
extra_segs = 2;
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
if (mbuf->nb_segs > MAX_SG_ENTRIES) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
|
|
|
|
MAX_SG_ENTRIES);
|
2018-01-22 08:46:38 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
|
2018-01-22 08:46:38 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
old_digest = ctx->digest;
|
|
|
|
|
|
|
|
/* output */
|
|
|
|
out_sg = &cf->sg[0];
|
|
|
|
qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
|
|
|
|
out_sg->length = ses->digest_length;
|
|
|
|
cpu_to_hw_sg(out_sg);
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
in_sg = &cf->sg[1];
|
|
|
|
/* need to extend the input to a compound frame */
|
|
|
|
in_sg->extension = 1;
|
|
|
|
in_sg->final = 1;
|
2019-10-14 06:53:28 +00:00
|
|
|
in_sg->length = data_len;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
/* 1st seg */
|
|
|
|
sg = in_sg + 1;
|
|
|
|
|
2019-10-14 06:53:28 +00:00
|
|
|
if (ses->iv.length) {
|
|
|
|
uint8_t *iv_ptr;
|
|
|
|
|
|
|
|
iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
|
|
|
|
|
|
|
if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
|
|
|
|
iv_ptr = conv_to_snow_f9_iv(iv_ptr);
|
|
|
|
sg->length = 12;
|
|
|
|
} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
|
|
|
|
iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
|
|
|
|
sg->length = 8;
|
|
|
|
} else {
|
|
|
|
sg->length = ses->iv.length;
|
|
|
|
}
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
|
2019-10-14 06:53:28 +00:00
|
|
|
in_sg->length += sg->length;
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2019-10-14 06:53:28 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-10-14 06:53:28 +00:00
|
|
|
sg->offset = data_offset;
|
|
|
|
|
|
|
|
if (data_len <= (mbuf->data_len - data_offset)) {
|
|
|
|
sg->length = data_len;
|
|
|
|
} else {
|
|
|
|
sg->length = mbuf->data_len - data_offset;
|
|
|
|
|
|
|
|
/* remaining i/p segs */
|
|
|
|
while ((data_len = data_len - sg->length) &&
|
|
|
|
(mbuf = mbuf->next)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-10-14 06:53:28 +00:00
|
|
|
if (data_len > mbuf->data_len)
|
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
else
|
|
|
|
sg->length = data_len;
|
|
|
|
}
|
2018-01-22 08:46:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (is_decode(ses)) {
|
|
|
|
/* Digest verification case */
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
|
|
|
rte_memcpy(old_digest, sym->auth.digest.data,
|
|
|
|
ses->digest_length);
|
2020-01-27 09:07:23 +00:00
|
|
|
start_addr = rte_dpaa_mem_vtop(old_digest);
|
2018-01-22 08:46:38 +00:00
|
|
|
qm_sg_entry_set64(sg, start_addr);
|
|
|
|
sg->length = ses->digest_length;
|
|
|
|
in_sg->length += ses->digest_length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
cpu_to_hw_sg(in_sg);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
/**
|
|
|
|
* packet looks like:
|
|
|
|
* |<----data_len------->|
|
|
|
|
* |ip_header|ah_header|icv|payload|
|
|
|
|
* ^
|
|
|
|
* |
|
|
|
|
* mbuf->pkt.data
|
|
|
|
*/
|
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct rte_mbuf *mbuf = sym->m_src;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
2019-10-14 06:53:28 +00:00
|
|
|
struct qm_sg_entry *sg, *in_sg;
|
2017-10-20 12:31:31 +00:00
|
|
|
rte_iova_t start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
uint8_t *old_digest;
|
2019-10-14 06:53:28 +00:00
|
|
|
int data_len, data_offset;
|
|
|
|
|
|
|
|
data_len = sym->auth.data.length;
|
|
|
|
data_offset = sym->auth.data.offset;
|
|
|
|
|
|
|
|
if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
|
|
|
|
ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
|
|
|
|
if ((data_len & 7) || (data_offset & 7)) {
|
|
|
|
DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_len = data_len >> 3;
|
|
|
|
data_offset = data_offset >> 3;
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, 4);
|
2017-10-09 14:21:40 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
old_digest = ctx->digest;
|
|
|
|
|
2017-11-05 23:22:55 +00:00
|
|
|
start_addr = rte_pktmbuf_iova(mbuf);
|
2017-10-09 14:21:40 +00:00
|
|
|
/* output */
|
|
|
|
sg = &cf->sg[0];
|
|
|
|
qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
|
|
|
|
sg->length = ses->digest_length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* input */
|
2019-10-14 06:53:28 +00:00
|
|
|
in_sg = &cf->sg[1];
|
|
|
|
/* need to extend the input to a compound frame */
|
|
|
|
in_sg->extension = 1;
|
|
|
|
in_sg->final = 1;
|
|
|
|
in_sg->length = data_len;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
|
2019-10-14 06:53:28 +00:00
|
|
|
sg = &cf->sg[2];
|
|
|
|
|
|
|
|
if (ses->iv.length) {
|
|
|
|
uint8_t *iv_ptr;
|
|
|
|
|
|
|
|
iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
|
|
|
|
|
|
|
if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
|
|
|
|
iv_ptr = conv_to_snow_f9_iv(iv_ptr);
|
|
|
|
sg->length = 12;
|
|
|
|
} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
|
|
|
|
iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
|
|
|
|
sg->length = 8;
|
|
|
|
} else {
|
|
|
|
sg->length = ses->iv.length;
|
|
|
|
}
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
|
2019-10-14 06:53:28 +00:00
|
|
|
in_sg->length += sg->length;
|
2017-10-09 14:21:40 +00:00
|
|
|
cpu_to_hw_sg(sg);
|
2019-10-14 06:53:28 +00:00
|
|
|
sg++;
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-10-14 06:53:28 +00:00
|
|
|
sg->offset = data_offset;
|
|
|
|
sg->length = data_len;
|
|
|
|
|
|
|
|
if (is_decode(ses)) {
|
|
|
|
/* Digest verification case */
|
|
|
|
cpu_to_hw_sg(sg);
|
2017-10-09 14:21:40 +00:00
|
|
|
/* hash result or digest, save digest first */
|
|
|
|
rte_memcpy(old_digest, sym->auth.digest.data,
|
2019-10-14 06:53:28 +00:00
|
|
|
ses->digest_length);
|
2017-10-09 14:21:40 +00:00
|
|
|
/* let's check digest by hw */
|
2020-01-27 09:07:23 +00:00
|
|
|
start_addr = rte_dpaa_mem_vtop(old_digest);
|
2017-10-09 14:21:40 +00:00
|
|
|
sg++;
|
|
|
|
qm_sg_entry_set64(sg, start_addr);
|
|
|
|
sg->length = ses->digest_length;
|
2019-10-14 06:53:28 +00:00
|
|
|
in_sg->length += ses->digest_length;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
2019-10-14 06:53:28 +00:00
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
cpu_to_hw_sg(in_sg);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2018-01-22 08:46:38 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg, *out_sg, *in_sg;
|
|
|
|
struct rte_mbuf *mbuf;
|
|
|
|
uint8_t req_segs;
|
|
|
|
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
2019-10-14 06:53:28 +00:00
|
|
|
int data_len, data_offset;
|
|
|
|
|
|
|
|
data_len = sym->cipher.data.length;
|
|
|
|
data_offset = sym->cipher.data.offset;
|
|
|
|
|
|
|
|
if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
|
|
|
|
ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
|
|
|
|
if ((data_len & 7) || (data_offset & 7)) {
|
|
|
|
DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_len = data_len >> 3;
|
|
|
|
data_offset = data_offset >> 3;
|
|
|
|
}
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
if (sym->m_dst) {
|
|
|
|
mbuf = sym->m_dst;
|
|
|
|
req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
|
|
|
|
} else {
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
req_segs = mbuf->nb_segs * 2 + 3;
|
|
|
|
}
|
2019-09-30 14:41:02 +00:00
|
|
|
if (mbuf->nb_segs > MAX_SG_ENTRIES) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
|
|
|
|
MAX_SG_ENTRIES);
|
2018-01-22 08:46:38 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, req_segs);
|
2018-01-22 08:46:38 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
|
|
|
|
/* output */
|
|
|
|
out_sg = &cf->sg[0];
|
|
|
|
out_sg->extension = 1;
|
2019-10-14 06:53:28 +00:00
|
|
|
out_sg->length = data_len;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(out_sg);
|
|
|
|
|
|
|
|
/* 1st seg */
|
|
|
|
sg = &cf->sg[2];
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-10-14 06:53:28 +00:00
|
|
|
sg->length = mbuf->data_len - data_offset;
|
|
|
|
sg->offset = data_offset;
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
in_sg = &cf->sg[1];
|
|
|
|
in_sg->extension = 1;
|
|
|
|
in_sg->final = 1;
|
2019-10-14 06:53:28 +00:00
|
|
|
in_sg->length = data_len + ses->iv.length;
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(in_sg);
|
|
|
|
|
|
|
|
/* IV */
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* 1st seg */
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-10-14 06:53:28 +00:00
|
|
|
sg->length = mbuf->data_len - data_offset;
|
|
|
|
sg->offset = data_offset;
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg;
|
2017-10-20 12:31:31 +00:00
|
|
|
rte_iova_t src_start_addr, dst_start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
2019-10-14 06:53:28 +00:00
|
|
|
int data_len, data_offset;
|
|
|
|
|
|
|
|
data_len = sym->cipher.data.length;
|
|
|
|
data_offset = sym->cipher.data.offset;
|
|
|
|
|
|
|
|
if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
|
|
|
|
ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
|
|
|
|
if ((data_len & 7) || (data_offset & 7)) {
|
|
|
|
DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_len = data_len >> 3;
|
|
|
|
data_offset = data_offset >> 3;
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, 4);
|
2017-10-09 14:21:40 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
2017-10-22 19:35:51 +00:00
|
|
|
|
2017-11-05 23:22:55 +00:00
|
|
|
src_start_addr = rte_pktmbuf_iova(sym->m_src);
|
2017-10-22 19:35:51 +00:00
|
|
|
|
|
|
|
if (sym->m_dst)
|
2017-11-05 23:22:55 +00:00
|
|
|
dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
|
2017-10-22 19:35:51 +00:00
|
|
|
else
|
|
|
|
dst_start_addr = src_start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
/* output */
|
|
|
|
sg = &cf->sg[0];
|
2019-10-14 06:53:28 +00:00
|
|
|
qm_sg_entry_set64(sg, dst_start_addr + data_offset);
|
|
|
|
sg->length = data_len + ses->iv.length;
|
2017-10-09 14:21:40 +00:00
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
sg = &cf->sg[1];
|
|
|
|
|
|
|
|
/* need to extend the input to a compound frame */
|
|
|
|
sg->extension = 1;
|
|
|
|
sg->final = 1;
|
2019-10-14 06:53:28 +00:00
|
|
|
sg->length = data_len + ses->iv.length;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
|
2017-10-09 14:21:40 +00:00
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sg = &cf->sg[2];
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sg++;
|
2019-10-14 06:53:28 +00:00
|
|
|
qm_sg_entry_set64(sg, src_start_addr + data_offset);
|
|
|
|
sg->length = data_len;
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2018-01-22 08:46:38 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg, *out_sg, *in_sg;
|
|
|
|
struct rte_mbuf *mbuf;
|
|
|
|
uint8_t req_segs;
|
|
|
|
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
|
|
|
|
|
|
|
if (sym->m_dst) {
|
|
|
|
mbuf = sym->m_dst;
|
|
|
|
req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
|
|
|
|
} else {
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
req_segs = mbuf->nb_segs * 2 + 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ses->auth_only_len)
|
|
|
|
req_segs++;
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
if (mbuf->nb_segs > MAX_SG_ENTRIES) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
|
2018-01-22 08:46:38 +00:00
|
|
|
MAX_SG_ENTRIES);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, req_segs);
|
2018-01-22 08:46:38 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
|
|
|
|
rte_prefetch0(cf->sg);
|
|
|
|
|
|
|
|
/* output */
|
|
|
|
out_sg = &cf->sg[0];
|
|
|
|
out_sg->extension = 1;
|
|
|
|
if (is_encode(ses))
|
2019-10-14 06:53:24 +00:00
|
|
|
out_sg->length = sym->aead.data.length + ses->digest_length;
|
2018-01-22 08:46:38 +00:00
|
|
|
else
|
2019-10-14 06:53:24 +00:00
|
|
|
out_sg->length = sym->aead.data.length;
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
/* output sg entries */
|
|
|
|
sg = &cf->sg[2];
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(out_sg);
|
|
|
|
|
|
|
|
/* 1st seg */
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-10-14 06:53:24 +00:00
|
|
|
sg->length = mbuf->data_len - sym->aead.data.offset;
|
|
|
|
sg->offset = sym->aead.data.offset;
|
2018-01-22 08:46:38 +00:00
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
sg->length -= ses->digest_length;
|
|
|
|
|
|
|
|
if (is_encode(ses)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
/* set auth output */
|
|
|
|
sg++;
|
|
|
|
qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
|
|
|
|
sg->length = ses->digest_length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
in_sg = &cf->sg[1];
|
|
|
|
in_sg->extension = 1;
|
|
|
|
in_sg->final = 1;
|
|
|
|
if (is_encode(ses))
|
|
|
|
in_sg->length = ses->iv.length + sym->aead.data.length
|
|
|
|
+ ses->auth_only_len;
|
|
|
|
else
|
|
|
|
in_sg->length = ses->iv.length + sym->aead.data.length
|
|
|
|
+ ses->auth_only_len + ses->digest_length;
|
|
|
|
|
|
|
|
/* input sg entries */
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(in_sg);
|
|
|
|
|
|
|
|
/* 1st seg IV */
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* 2nd seg auth only */
|
|
|
|
if (ses->auth_only_len) {
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = ses->auth_only_len;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 3rd seg */
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len - sym->aead.data.offset;
|
|
|
|
sg->offset = sym->aead.data.offset;
|
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_decode(ses)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
|
|
|
memcpy(ctx->digest, sym->aead.digest.data,
|
|
|
|
ses->digest_length);
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = ses->digest_length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg;
|
|
|
|
uint32_t length = 0;
|
2017-10-20 12:31:31 +00:00
|
|
|
rte_iova_t src_start_addr, dst_start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
|
|
|
|
2017-11-07 12:15:08 +00:00
|
|
|
src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
|
2017-10-22 19:35:51 +00:00
|
|
|
|
|
|
|
if (sym->m_dst)
|
2017-11-07 12:15:08 +00:00
|
|
|
dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
|
2017-10-22 19:35:51 +00:00
|
|
|
else
|
|
|
|
dst_start_addr = src_start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, 7);
|
2017-10-09 14:21:40 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
rte_prefetch0(cf->sg);
|
|
|
|
sg = &cf->sg[2];
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
|
2017-10-09 14:21:40 +00:00
|
|
|
if (is_encode(ses)) {
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sg++;
|
|
|
|
if (ses->auth_only_len) {
|
|
|
|
qm_sg_entry_set64(sg,
|
2020-01-27 09:07:23 +00:00
|
|
|
rte_dpaa_mem_vtop(sym->aead.aad.data));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->auth_only_len;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
|
|
|
}
|
2017-10-22 19:35:51 +00:00
|
|
|
qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = sym->aead.data.length;
|
|
|
|
length += sg->length;
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
} else {
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sg++;
|
|
|
|
if (ses->auth_only_len) {
|
|
|
|
qm_sg_entry_set64(sg,
|
2020-01-27 09:07:23 +00:00
|
|
|
rte_dpaa_mem_vtop(sym->aead.aad.data));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->auth_only_len;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
|
|
|
}
|
2017-10-22 19:35:51 +00:00
|
|
|
qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = sym->aead.data.length;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
memcpy(ctx->digest, sym->aead.digest.data,
|
|
|
|
ses->digest_length);
|
|
|
|
sg++;
|
|
|
|
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->digest_length;
|
|
|
|
length += sg->length;
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
}
|
|
|
|
/* input compound frame */
|
|
|
|
cf->sg[1].length = length;
|
|
|
|
cf->sg[1].extension = 1;
|
|
|
|
cf->sg[1].final = 1;
|
|
|
|
cpu_to_hw_sg(&cf->sg[1]);
|
|
|
|
|
|
|
|
/* output */
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
|
2017-10-09 14:21:40 +00:00
|
|
|
qm_sg_entry_set64(sg,
|
2019-10-14 06:53:24 +00:00
|
|
|
dst_start_addr + sym->aead.data.offset);
|
|
|
|
sg->length = sym->aead.data.length;
|
2017-10-09 14:21:40 +00:00
|
|
|
length = sg->length;
|
|
|
|
if (is_encode(ses)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
/* set auth output */
|
|
|
|
sg++;
|
|
|
|
qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
|
|
|
|
sg->length = ses->digest_length;
|
|
|
|
length += sg->length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* output compound frame */
|
|
|
|
cf->sg[0].length = length;
|
|
|
|
cf->sg[0].extension = 1;
|
|
|
|
cpu_to_hw_sg(&cf->sg[0]);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2018-01-22 08:46:38 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg, *out_sg, *in_sg;
|
|
|
|
struct rte_mbuf *mbuf;
|
|
|
|
uint8_t req_segs;
|
|
|
|
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
|
|
|
|
|
|
|
if (sym->m_dst) {
|
|
|
|
mbuf = sym->m_dst;
|
|
|
|
req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
|
|
|
|
} else {
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
req_segs = mbuf->nb_segs * 2 + 4;
|
|
|
|
}
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
if (mbuf->nb_segs > MAX_SG_ENTRIES) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
|
2018-01-22 08:46:38 +00:00
|
|
|
MAX_SG_ENTRIES);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, req_segs);
|
2018-01-22 08:46:38 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
|
|
|
|
rte_prefetch0(cf->sg);
|
|
|
|
|
|
|
|
/* output */
|
|
|
|
out_sg = &cf->sg[0];
|
|
|
|
out_sg->extension = 1;
|
|
|
|
if (is_encode(ses))
|
|
|
|
out_sg->length = sym->auth.data.length + ses->digest_length;
|
|
|
|
else
|
|
|
|
out_sg->length = sym->auth.data.length;
|
|
|
|
|
|
|
|
/* output sg entries */
|
|
|
|
sg = &cf->sg[2];
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(out_sg);
|
|
|
|
|
|
|
|
/* 1st seg */
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len - sym->auth.data.offset;
|
|
|
|
sg->offset = sym->auth.data.offset;
|
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
sg->length -= ses->digest_length;
|
|
|
|
|
|
|
|
if (is_encode(ses)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
/* set auth output */
|
|
|
|
sg++;
|
|
|
|
qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
|
|
|
|
sg->length = ses->digest_length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
in_sg = &cf->sg[1];
|
|
|
|
in_sg->extension = 1;
|
|
|
|
in_sg->final = 1;
|
|
|
|
if (is_encode(ses))
|
|
|
|
in_sg->length = ses->iv.length + sym->auth.data.length;
|
|
|
|
else
|
|
|
|
in_sg->length = ses->iv.length + sym->auth.data.length
|
|
|
|
+ ses->digest_length;
|
|
|
|
|
|
|
|
/* input sg entries */
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
|
2018-01-22 08:46:38 +00:00
|
|
|
cpu_to_hw_sg(in_sg);
|
|
|
|
|
|
|
|
/* 1st seg IV */
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* 2nd seg */
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len - sym->auth.data.offset;
|
|
|
|
sg->offset = sym->auth.data.offset;
|
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg->length -= ses->digest_length;
|
|
|
|
if (is_decode(ses)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
|
|
|
memcpy(ctx->digest, sym->auth.digest.data,
|
|
|
|
ses->digest_length);
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
|
2018-01-22 08:46:38 +00:00
|
|
|
sg->length = ses->digest_length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg;
|
2017-10-20 12:31:31 +00:00
|
|
|
rte_iova_t src_start_addr, dst_start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
uint32_t length = 0;
|
|
|
|
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
|
|
|
ses->iv.offset);
|
|
|
|
|
2017-10-20 12:31:32 +00:00
|
|
|
src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
|
2017-10-22 19:35:51 +00:00
|
|
|
if (sym->m_dst)
|
2017-10-20 12:31:32 +00:00
|
|
|
dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
|
2017-10-22 19:35:51 +00:00
|
|
|
else
|
|
|
|
dst_start_addr = src_start_addr;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, 7);
|
2017-10-09 14:21:40 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
rte_prefetch0(cf->sg);
|
|
|
|
sg = &cf->sg[2];
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
|
2017-10-09 14:21:40 +00:00
|
|
|
if (is_encode(ses)) {
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sg++;
|
2017-10-22 19:35:51 +00:00
|
|
|
qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = sym->auth.data.length;
|
|
|
|
length += sg->length;
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
} else {
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->iv.length;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sg++;
|
|
|
|
|
2017-10-22 19:35:51 +00:00
|
|
|
qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = sym->auth.data.length;
|
|
|
|
length += sg->length;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
memcpy(ctx->digest, sym->auth.digest.data,
|
|
|
|
ses->digest_length);
|
|
|
|
sg++;
|
|
|
|
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = ses->digest_length;
|
|
|
|
length += sg->length;
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
}
|
|
|
|
/* input compound frame */
|
|
|
|
cf->sg[1].length = length;
|
|
|
|
cf->sg[1].extension = 1;
|
|
|
|
cf->sg[1].final = 1;
|
|
|
|
cpu_to_hw_sg(&cf->sg[1]);
|
|
|
|
|
|
|
|
/* output */
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
|
2017-10-22 19:35:51 +00:00
|
|
|
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
|
2017-10-09 14:21:40 +00:00
|
|
|
sg->length = sym->cipher.data.length;
|
|
|
|
length = sg->length;
|
|
|
|
if (is_encode(ses)) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
/* set auth output */
|
|
|
|
sg++;
|
|
|
|
qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
|
|
|
|
sg->length = ses->digest_length;
|
|
|
|
length += sg->length;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
/* output compound frame */
|
|
|
|
cf->sg[0].length = length;
|
|
|
|
cf->sg[0].extension = 1;
|
|
|
|
cpu_to_hw_sg(&cf->sg[0]);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-15 06:35:53 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg;
|
|
|
|
phys_addr_t src_start_addr, dst_start_addr;
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, 2);
|
2018-01-15 06:35:53 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
|
2020-09-14 14:33:24 +00:00
|
|
|
src_start_addr = rte_pktmbuf_iova(sym->m_src);
|
2018-01-15 06:35:53 +00:00
|
|
|
|
|
|
|
if (sym->m_dst)
|
2020-09-14 14:33:24 +00:00
|
|
|
dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
|
2018-01-15 06:35:53 +00:00
|
|
|
else
|
|
|
|
dst_start_addr = src_start_addr;
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
sg = &cf->sg[1];
|
|
|
|
qm_sg_entry_set64(sg, src_start_addr);
|
|
|
|
sg->length = sym->m_src->pkt_len;
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
|
|
|
|
/* output */
|
|
|
|
sg = &cf->sg[0];
|
|
|
|
qm_sg_entry_set64(sg, dst_start_addr);
|
|
|
|
sg->length = sym->m_src->buf_len - sym->m_src->data_off;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2019-09-30 14:40:48 +00:00
|
|
|
static inline struct dpaa_sec_job *
|
|
|
|
build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
|
|
|
{
|
|
|
|
struct rte_crypto_sym_op *sym = op->sym;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct qm_sg_entry *sg, *out_sg, *in_sg;
|
|
|
|
struct rte_mbuf *mbuf;
|
|
|
|
uint8_t req_segs;
|
|
|
|
uint32_t in_len = 0, out_len = 0;
|
|
|
|
|
|
|
|
if (sym->m_dst)
|
|
|
|
mbuf = sym->m_dst;
|
|
|
|
else
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
|
|
|
|
req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
|
2019-09-30 14:41:02 +00:00
|
|
|
if (mbuf->nb_segs > MAX_SG_ENTRIES) {
|
2019-09-30 14:40:48 +00:00
|
|
|
DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
|
|
|
|
MAX_SG_ENTRIES);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-30 14:41:02 +00:00
|
|
|
ctx = dpaa_sec_alloc_ctx(ses, req_segs);
|
2019-09-30 14:40:48 +00:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
cf = &ctx->job;
|
|
|
|
ctx->op = op;
|
|
|
|
/* output */
|
|
|
|
out_sg = &cf->sg[0];
|
|
|
|
out_sg->extension = 1;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
|
2019-09-30 14:40:48 +00:00
|
|
|
|
|
|
|
/* 1st seg */
|
|
|
|
sg = &cf->sg[2];
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-09-30 14:40:48 +00:00
|
|
|
sg->offset = 0;
|
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
while (mbuf->next) {
|
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
out_len += sg->length;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-09-30 14:40:48 +00:00
|
|
|
sg->offset = 0;
|
|
|
|
}
|
|
|
|
sg->length = mbuf->buf_len - mbuf->data_off;
|
|
|
|
out_len += sg->length;
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
out_sg->length = out_len;
|
|
|
|
cpu_to_hw_sg(out_sg);
|
|
|
|
|
|
|
|
/* input */
|
|
|
|
mbuf = sym->m_src;
|
|
|
|
in_sg = &cf->sg[1];
|
|
|
|
in_sg->extension = 1;
|
|
|
|
in_sg->final = 1;
|
|
|
|
in_len = mbuf->data_len;
|
|
|
|
|
|
|
|
sg++;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
|
2019-09-30 14:40:48 +00:00
|
|
|
|
|
|
|
/* 1st seg */
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-09-30 14:40:48 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
sg->offset = 0;
|
|
|
|
|
|
|
|
/* Successive segs */
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
while (mbuf) {
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
sg++;
|
2020-09-14 14:33:24 +00:00
|
|
|
qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
|
2019-09-30 14:40:48 +00:00
|
|
|
sg->length = mbuf->data_len;
|
|
|
|
sg->offset = 0;
|
|
|
|
in_len += sg->length;
|
|
|
|
mbuf = mbuf->next;
|
|
|
|
}
|
|
|
|
sg->final = 1;
|
|
|
|
cpu_to_hw_sg(sg);
|
|
|
|
|
|
|
|
in_sg->length = in_len;
|
|
|
|
cpu_to_hw_sg(in_sg);
|
|
|
|
|
|
|
|
sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2019-09-30 14:40:48 +00:00
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static uint16_t
|
|
|
|
dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
|
|
|
|
uint16_t nb_ops)
|
|
|
|
{
|
|
|
|
/* Function to transmit the frames to given device and queuepair */
|
|
|
|
uint32_t loop;
|
|
|
|
struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
|
|
|
|
uint16_t num_tx = 0;
|
2018-01-11 11:44:06 +00:00
|
|
|
struct qm_fd fds[DPAA_SEC_BURST], *fd;
|
|
|
|
uint32_t frames_to_send;
|
|
|
|
struct rte_crypto_op *op;
|
|
|
|
struct dpaa_sec_job *cf;
|
|
|
|
dpaa_sec_session *ses;
|
2019-10-14 06:53:25 +00:00
|
|
|
uint16_t auth_hdr_len, auth_tail_len;
|
|
|
|
uint32_t index, flags[DPAA_SEC_BURST] = {0};
|
2018-01-11 11:44:06 +00:00
|
|
|
struct qman_fq *inq[DPAA_SEC_BURST];
|
|
|
|
|
|
|
|
while (nb_ops) {
|
|
|
|
frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
|
|
|
|
DPAA_SEC_BURST : nb_ops;
|
|
|
|
for (loop = 0; loop < frames_to_send; loop++) {
|
|
|
|
op = *(ops++);
|
2020-10-28 12:20:09 +00:00
|
|
|
if (*dpaa_seqn(op->sym->m_src) != 0) {
|
|
|
|
index = *dpaa_seqn(op->sym->m_src) - 1;
|
2019-10-04 11:26:58 +00:00
|
|
|
if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
|
|
|
|
/* QM_EQCR_DCA_IDXMASK = 0x0f */
|
|
|
|
flags[loop] = ((index & 0x0f) << 8);
|
|
|
|
flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
|
|
|
|
DPAA_PER_LCORE_DQRR_SIZE--;
|
|
|
|
DPAA_PER_LCORE_DQRR_HELD &=
|
|
|
|
~(1 << index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 11:44:06 +00:00
|
|
|
switch (op->sess_type) {
|
|
|
|
case RTE_CRYPTO_OP_WITH_SESSION:
|
|
|
|
ses = (dpaa_sec_session *)
|
2018-07-05 02:08:04 +00:00
|
|
|
get_sym_session_private_data(
|
2018-01-11 11:44:06 +00:00
|
|
|
op->sym->session,
|
|
|
|
cryptodev_driver_id);
|
|
|
|
break;
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-11 11:44:06 +00:00
|
|
|
case RTE_CRYPTO_OP_SECURITY_SESSION:
|
|
|
|
ses = (dpaa_sec_session *)
|
|
|
|
get_sec_session_private_data(
|
|
|
|
op->sym->sec_session);
|
|
|
|
break;
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2018-01-11 11:44:06 +00:00
|
|
|
default:
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR(
|
2018-01-11 11:44:06 +00:00
|
|
|
"sessionless crypto op not supported");
|
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
|
|
|
}
|
2019-11-06 05:17:21 +00:00
|
|
|
|
|
|
|
if (!ses) {
|
|
|
|
DPAA_SEC_DP_ERR("session not available");
|
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
|
|
|
}
|
|
|
|
|
2019-01-09 15:14:17 +00:00
|
|
|
if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
|
2018-01-11 11:44:06 +00:00
|
|
|
if (dpaa_sec_attach_sess_q(qp, ses)) {
|
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
|
|
|
}
|
2019-01-09 15:14:17 +00:00
|
|
|
} else if (unlikely(ses->qp[rte_lcore_id() %
|
|
|
|
MAX_DPAA_CORES] != qp)) {
|
2018-08-30 05:50:59 +00:00
|
|
|
DPAA_SEC_DP_ERR("Old:sess->qp = %p"
|
2019-01-09 15:14:17 +00:00
|
|
|
" New qp = %p\n",
|
|
|
|
ses->qp[rte_lcore_id() %
|
|
|
|
MAX_DPAA_CORES], qp);
|
2018-08-30 05:50:59 +00:00
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
2018-01-11 11:44:06 +00:00
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-10-14 06:53:25 +00:00
|
|
|
auth_hdr_len = op->sym->auth.data.length -
|
2018-01-11 11:44:06 +00:00
|
|
|
op->sym->cipher.data.length;
|
2019-10-14 06:53:25 +00:00
|
|
|
auth_tail_len = 0;
|
|
|
|
|
2019-09-30 14:40:48 +00:00
|
|
|
if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
|
|
|
|
((op->sym->m_dst == NULL) ||
|
|
|
|
rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
|
2019-10-14 06:53:30 +00:00
|
|
|
switch (ses->ctxt) {
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2019-10-14 06:53:30 +00:00
|
|
|
case DPAA_SEC_PDCP:
|
|
|
|
case DPAA_SEC_IPSEC:
|
2019-01-09 15:13:16 +00:00
|
|
|
cf = build_proto(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2019-10-14 06:53:30 +00:00
|
|
|
case DPAA_SEC_AUTH:
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_auth_only(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_CIPHER:
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_cipher_only(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_AEAD:
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_cipher_auth_gcm(op, ses);
|
2019-10-14 06:53:25 +00:00
|
|
|
auth_hdr_len = ses->auth_only_len;
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_CIPHER_HASH:
|
2019-10-14 06:53:25 +00:00
|
|
|
auth_hdr_len =
|
|
|
|
op->sym->cipher.data.offset
|
|
|
|
- op->sym->auth.data.offset;
|
|
|
|
auth_tail_len =
|
|
|
|
op->sym->auth.data.length
|
|
|
|
- op->sym->cipher.data.length
|
|
|
|
- auth_hdr_len;
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_cipher_auth(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
default:
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("not supported ops");
|
2018-01-22 08:46:38 +00:00
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
|
|
|
}
|
2018-01-11 11:44:06 +00:00
|
|
|
} else {
|
2019-10-14 06:53:30 +00:00
|
|
|
switch (ses->ctxt) {
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2019-10-14 06:53:30 +00:00
|
|
|
case DPAA_SEC_PDCP:
|
|
|
|
case DPAA_SEC_IPSEC:
|
2019-09-30 14:40:48 +00:00
|
|
|
cf = build_proto_sg(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2019-10-14 06:53:30 +00:00
|
|
|
case DPAA_SEC_AUTH:
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_auth_only_sg(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_CIPHER:
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_cipher_only_sg(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_AEAD:
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_cipher_auth_gcm_sg(op, ses);
|
2019-10-14 06:53:25 +00:00
|
|
|
auth_hdr_len = ses->auth_only_len;
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
case DPAA_SEC_CIPHER_HASH:
|
2019-10-14 06:53:25 +00:00
|
|
|
auth_hdr_len =
|
|
|
|
op->sym->cipher.data.offset
|
|
|
|
- op->sym->auth.data.offset;
|
|
|
|
auth_tail_len =
|
|
|
|
op->sym->auth.data.length
|
|
|
|
- op->sym->cipher.data.length
|
|
|
|
- auth_hdr_len;
|
2018-01-22 08:46:38 +00:00
|
|
|
cf = build_cipher_auth_sg(op, ses);
|
2019-10-14 06:53:30 +00:00
|
|
|
break;
|
|
|
|
default:
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_ERR("not supported ops");
|
2018-01-22 08:46:38 +00:00
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
|
|
|
}
|
2018-01-11 11:44:06 +00:00
|
|
|
}
|
|
|
|
if (unlikely(!cf)) {
|
|
|
|
frames_to_send = loop;
|
|
|
|
nb_ops = loop;
|
|
|
|
goto send_pkts;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = &fds[loop];
|
2019-01-09 15:14:17 +00:00
|
|
|
inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
|
2018-01-11 11:44:06 +00:00
|
|
|
fd->opaque_addr = 0;
|
|
|
|
fd->cmd = 0;
|
2020-01-27 09:07:23 +00:00
|
|
|
qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
|
2018-01-11 11:44:06 +00:00
|
|
|
fd->_format1 = qm_fd_compound;
|
|
|
|
fd->length29 = 2 * sizeof(struct qm_sg_entry);
|
2019-10-14 06:53:25 +00:00
|
|
|
|
2018-01-11 11:44:06 +00:00
|
|
|
/* Auth_only_len is set as 0 in descriptor and it is
|
|
|
|
* overwritten here in the fd.cmd which will update
|
|
|
|
* the DPOVRD reg.
|
|
|
|
*/
|
2019-10-14 06:53:25 +00:00
|
|
|
if (auth_hdr_len || auth_tail_len) {
|
|
|
|
fd->cmd = 0x80000000;
|
|
|
|
fd->cmd |=
|
|
|
|
((auth_tail_len << 16) | auth_hdr_len);
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2019-09-30 14:40:44 +00:00
|
|
|
/* In case of PDCP, per packet HFN is stored in
|
|
|
|
* mbuf priv after sym_op.
|
|
|
|
*/
|
2019-10-14 06:53:30 +00:00
|
|
|
if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
|
2019-09-30 14:40:44 +00:00
|
|
|
fd->cmd = 0x80000000 |
|
|
|
|
*((uint32_t *)((uint8_t *)op +
|
|
|
|
ses->pdcp.hfn_ovd_offset));
|
2019-10-14 06:53:30 +00:00
|
|
|
DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
|
2019-09-30 14:40:44 +00:00
|
|
|
*((uint32_t *)((uint8_t *)op +
|
|
|
|
ses->pdcp.hfn_ovd_offset)),
|
2019-10-14 06:53:30 +00:00
|
|
|
ses->pdcp.hfn_ovd);
|
2019-09-30 14:40:44 +00:00
|
|
|
}
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
2018-01-11 11:44:06 +00:00
|
|
|
send_pkts:
|
|
|
|
loop = 0;
|
|
|
|
while (loop < frames_to_send) {
|
|
|
|
loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
|
2019-10-04 11:26:58 +00:00
|
|
|
&flags[loop], frames_to_send - loop);
|
2018-01-11 11:44:06 +00:00
|
|
|
}
|
|
|
|
nb_ops -= frames_to_send;
|
|
|
|
num_tx += frames_to_send;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
2018-01-11 11:44:06 +00:00
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
dpaa_qp->tx_pkts += num_tx;
|
|
|
|
dpaa_qp->tx_errs += nb_ops - num_tx;
|
|
|
|
|
|
|
|
return num_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t
|
|
|
|
dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
|
|
|
|
uint16_t nb_ops)
|
|
|
|
{
|
|
|
|
uint16_t num_rx;
|
|
|
|
struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
|
|
|
|
|
|
|
|
num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
|
|
|
|
|
|
|
|
dpaa_qp->rx_pkts += num_rx;
|
|
|
|
dpaa_qp->rx_errs += nb_ops - num_rx;
|
|
|
|
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
return num_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Release queue pair */
|
|
|
|
static int
|
|
|
|
dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
|
|
|
|
uint16_t qp_id)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *internals;
|
|
|
|
struct dpaa_sec_qp *qp = NULL;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
internals = dev->data->dev_private;
|
|
|
|
if (qp_id >= internals->max_nb_queue_pairs) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Max supported qpid %d",
|
2017-10-09 14:21:40 +00:00
|
|
|
internals->max_nb_queue_pairs);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qp = &internals->qps[qp_id];
|
2019-09-30 14:41:03 +00:00
|
|
|
rte_mempool_free(qp->ctx_pool);
|
2017-10-09 14:21:40 +00:00
|
|
|
qp->internals = NULL;
|
|
|
|
dev->data->queue_pairs[qp_id] = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Setup a queue pair */
|
|
|
|
static int
|
|
|
|
dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
|
|
|
|
__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
|
2019-01-10 14:50:11 +00:00
|
|
|
__rte_unused int socket_id)
|
2017-10-09 14:21:40 +00:00
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *internals;
|
|
|
|
struct dpaa_sec_qp *qp = NULL;
|
2019-09-30 14:41:03 +00:00
|
|
|
char str[20];
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
internals = dev->data->dev_private;
|
|
|
|
if (qp_id >= internals->max_nb_queue_pairs) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Max supported qpid %d",
|
2017-10-09 14:21:40 +00:00
|
|
|
internals->max_nb_queue_pairs);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qp = &internals->qps[qp_id];
|
|
|
|
qp->internals = internals;
|
2019-09-30 14:41:03 +00:00
|
|
|
snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
|
|
|
|
dev->data->dev_id, qp_id);
|
|
|
|
if (!qp->ctx_pool) {
|
|
|
|
qp->ctx_pool = rte_mempool_create((const char *)str,
|
|
|
|
CTX_POOL_NUM_BUFS,
|
|
|
|
CTX_POOL_BUF_SIZE,
|
|
|
|
CTX_POOL_CACHE_SIZE, 0,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
SOCKET_ID_ANY, 0);
|
|
|
|
if (!qp->ctx_pool) {
|
|
|
|
DPAA_SEC_ERR("%s create failed\n", str);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
|
|
|
|
dev->data->dev_id, qp_id);
|
2017-10-09 14:21:40 +00:00
|
|
|
dev->data->queue_pairs[qp_id] = qp;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the size of session structure */
|
|
|
|
static unsigned int
|
2018-07-05 02:08:04 +00:00
|
|
|
dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
|
2017-10-09 14:21:40 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
return sizeof(dpaa_sec_session);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
|
|
|
|
struct rte_crypto_sym_xform *xform,
|
|
|
|
dpaa_sec_session *session)
|
|
|
|
{
|
2019-11-06 05:17:23 +00:00
|
|
|
session->ctxt = DPAA_SEC_CIPHER;
|
2017-10-09 14:21:40 +00:00
|
|
|
session->cipher_alg = xform->cipher.algo;
|
|
|
|
session->iv.length = xform->cipher.iv.length;
|
|
|
|
session->iv.offset = xform->cipher.iv.offset;
|
|
|
|
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("No Memory for cipher key");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
session->cipher_key.length = xform->cipher.key.length;
|
|
|
|
|
|
|
|
memcpy(session->cipher_key.data, xform->cipher.key.data,
|
|
|
|
xform->cipher.key.length);
|
2019-10-14 06:53:30 +00:00
|
|
|
switch (xform->cipher.algo) {
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CBC:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_AES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CBC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_3DES_CBC:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CBC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CTR:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_AES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CTR;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
|
|
|
|
xform->cipher.algo);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
|
|
|
|
DIR_ENC : DIR_DEC;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
|
|
|
|
struct rte_crypto_sym_xform *xform,
|
|
|
|
dpaa_sec_session *session)
|
|
|
|
{
|
2019-11-06 05:17:23 +00:00
|
|
|
session->ctxt = DPAA_SEC_AUTH;
|
2017-10-09 14:21:40 +00:00
|
|
|
session->auth_alg = xform->auth.algo;
|
|
|
|
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("No Memory for auth key");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
session->auth_key.length = xform->auth.key.length;
|
|
|
|
session->digest_length = xform->auth.digest_length;
|
2019-10-14 06:53:28 +00:00
|
|
|
if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
|
|
|
|
session->iv.offset = xform->auth.iv.offset;
|
|
|
|
session->iv.length = xform->auth.iv.length;
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
memcpy(session->auth_key.data, xform->auth.key.data,
|
|
|
|
xform->auth.key.length);
|
2019-10-14 06:53:30 +00:00
|
|
|
|
|
|
|
switch (xform->auth.algo) {
|
|
|
|
case RTE_CRYPTO_AUTH_SHA1_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_MD5_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_MD5;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA224_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA256_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA384_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA512_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_F9;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_ZUC_EIA3:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_F9;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
|
|
|
|
xform->auth.algo);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
|
|
|
|
DIR_ENC : DIR_DEC;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-14 06:53:30 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
|
|
|
|
struct rte_crypto_sym_xform *xform,
|
|
|
|
dpaa_sec_session *session)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct rte_crypto_cipher_xform *cipher_xform;
|
|
|
|
struct rte_crypto_auth_xform *auth_xform;
|
|
|
|
|
2019-11-06 05:17:23 +00:00
|
|
|
session->ctxt = DPAA_SEC_CIPHER_HASH;
|
2019-10-14 06:53:30 +00:00
|
|
|
if (session->auth_cipher_text) {
|
|
|
|
cipher_xform = &xform->cipher;
|
|
|
|
auth_xform = &xform->next->auth;
|
|
|
|
} else {
|
|
|
|
cipher_xform = &xform->next->cipher;
|
|
|
|
auth_xform = &xform->auth;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set IV parameters */
|
|
|
|
session->iv.offset = cipher_xform->iv.offset;
|
|
|
|
session->iv.length = cipher_xform->iv.length;
|
|
|
|
|
|
|
|
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for cipher key");
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOMEM;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
|
|
|
session->cipher_key.length = cipher_xform->key.length;
|
|
|
|
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for auth key");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
session->auth_key.length = auth_xform->key.length;
|
|
|
|
memcpy(session->cipher_key.data, cipher_xform->key.data,
|
|
|
|
cipher_xform->key.length);
|
|
|
|
memcpy(session->auth_key.data, auth_xform->key.data,
|
|
|
|
auth_xform->key.length);
|
|
|
|
|
|
|
|
session->digest_length = auth_xform->digest_length;
|
|
|
|
session->auth_alg = auth_xform->algo;
|
|
|
|
|
|
|
|
switch (auth_xform->algo) {
|
|
|
|
case RTE_CRYPTO_AUTH_SHA1_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_MD5_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_MD5;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA224_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA256_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA384_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA512_HMAC:
|
|
|
|
session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
|
|
|
|
auth_xform->algo);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
session->cipher_alg = cipher_xform->algo;
|
|
|
|
|
|
|
|
switch (cipher_xform->algo) {
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CBC:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_AES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CBC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_3DES_CBC:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CBC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CTR:
|
|
|
|
session->cipher_key.alg = OP_ALG_ALGSEL_AES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CTR;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
|
|
|
|
cipher_xform->algo);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
|
|
|
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
|
|
|
|
DIR_ENC : DIR_DEC;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
|
|
|
|
struct rte_crypto_sym_xform *xform,
|
|
|
|
dpaa_sec_session *session)
|
|
|
|
{
|
|
|
|
session->aead_alg = xform->aead.algo;
|
2019-10-14 06:53:30 +00:00
|
|
|
session->ctxt = DPAA_SEC_AEAD;
|
2017-10-09 14:21:40 +00:00
|
|
|
session->iv.length = xform->aead.iv.length;
|
|
|
|
session->iv.offset = xform->aead.iv.offset;
|
|
|
|
session->auth_only_len = xform->aead.aad_length;
|
|
|
|
session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("No Memory for aead key\n");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
session->aead_key.length = xform->aead.key.length;
|
|
|
|
session->digest_length = xform->aead.digest_length;
|
|
|
|
|
|
|
|
memcpy(session->aead_key.data, xform->aead.key.data,
|
|
|
|
xform->aead.key.length);
|
2019-10-14 06:53:30 +00:00
|
|
|
|
|
|
|
switch (session->aead_alg) {
|
|
|
|
case RTE_CRYPTO_AEAD_AES_GCM:
|
|
|
|
session->aead_key.alg = OP_ALG_ALGSEL_AES;
|
|
|
|
session->aead_key.algmode = OP_ALG_AAI_GCM;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
|
|
|
|
DIR_ENC : DIR_DEC;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-15 06:35:52 +00:00
|
|
|
static struct qman_fq *
|
|
|
|
dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
|
2017-10-09 14:21:40 +00:00
|
|
|
{
|
2018-01-15 06:35:52 +00:00
|
|
|
unsigned int i;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-11-06 05:17:29 +00:00
|
|
|
for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
|
2018-01-15 06:35:52 +00:00
|
|
|
if (qi->inq_attach[i] == 0) {
|
|
|
|
qi->inq_attach[i] = 1;
|
|
|
|
return &qi->inq[i];
|
|
|
|
}
|
|
|
|
}
|
2019-03-27 11:53:36 +00:00
|
|
|
DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
|
2018-01-15 06:35:52 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2018-01-15 06:35:52 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2019-11-06 05:17:29 +00:00
|
|
|
for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
|
2018-01-15 06:35:52 +00:00
|
|
|
if (&qi->inq[i] == fq) {
|
2019-11-06 05:17:29 +00:00
|
|
|
if (qman_retire_fq(fq, NULL) != 0)
|
2021-01-14 07:05:01 +00:00
|
|
|
DPAA_SEC_DEBUG("Queue is not retired\n");
|
2018-01-16 11:08:17 +00:00
|
|
|
qman_oos_fq(fq);
|
2018-01-15 06:35:52 +00:00
|
|
|
qi->inq_attach[i] = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
2018-01-15 06:35:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
|
|
|
|
{
|
|
|
|
int ret;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2019-01-09 15:14:17 +00:00
|
|
|
sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
|
2018-01-15 06:35:52 +00:00
|
|
|
ret = dpaa_sec_prep_cdb(sess);
|
|
|
|
if (ret) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Unable to prepare sec cdb");
|
2020-05-09 22:22:59 +00:00
|
|
|
return ret;
|
2018-01-15 06:35:52 +00:00
|
|
|
}
|
2020-07-07 09:22:27 +00:00
|
|
|
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
|
2018-04-02 15:33:18 +00:00
|
|
|
ret = rte_dpaa_portal_init((void *)0);
|
|
|
|
if (ret) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Failure in affining portal");
|
2018-04-02 15:33:18 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2019-01-09 15:14:17 +00:00
|
|
|
ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
|
2020-01-27 09:07:23 +00:00
|
|
|
rte_dpaa_mem_vtop(&sess->cdb),
|
2018-01-15 06:35:52 +00:00
|
|
|
qman_fq_fqid(&qp->outq));
|
|
|
|
if (ret)
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Unable to init sec queue");
|
2018-01-15 06:35:52 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
crypto/dpaa_sec: improve memory freeing
This patch fixes management of memory for authentication
and encryption keys.
There were two issues with former state of implementation:
1) Invalid access to dpaa_sec_session union members
The dpaa_sec_session structure includes an anonymous union:
union {
struct {...} aead_key;
struct {
struct {...} cipher_key;
struct {...} auth_key;
};
};
Depending on the used algorithm a rte_zmalloc() function
allocated memory that was kept in aead_key, cipher_key
or auth_key. However every time the memory was released,
rte_free() was called only on cipher and auth keys, even
if pointer to allocated memory was stored in aead_key.
The C language specification defines such behavior as undefined.
As the cipher_key and aead_key are similar, have same sizes and
alignment, it has worked, but it's directly against C specification.
This patch fixes this, providing a free_session_data() function
to free the keys data. It verifies which algorithm was used
(aead or auth+cipher) and frees proper part of the union.
2) Some keys might have been freed multiple times
In functions like: dpaa_sec_cipher_init(), dpaa_sec_auth_init(),
dpaa_sec_chain_init(), dpaa_sec_aead_init() keys data were freed
before returning due to some error conditions. However the pointers
were not zeroed causing another calls to ret_free from higher
layers of code. This causes an error log about invalid memory address
to be printed.
This patch fixes it by making only one layer responsible for freeing
memory
Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
2020-05-05 21:41:04 +00:00
|
|
|
static inline void
|
|
|
|
free_session_data(dpaa_sec_session *s)
|
|
|
|
{
|
|
|
|
if (is_aead(s))
|
|
|
|
rte_free(s->aead_key.data);
|
|
|
|
else {
|
|
|
|
rte_free(s->auth_key.data);
|
|
|
|
rte_free(s->cipher_key.data);
|
|
|
|
}
|
|
|
|
memset(s, 0, sizeof(dpaa_sec_session));
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
|
|
|
|
struct rte_crypto_sym_xform *xform, void *sess)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
|
|
|
|
dpaa_sec_session *session = sess;
|
2019-01-09 15:14:17 +00:00
|
|
|
uint32_t i;
|
2019-11-06 05:17:23 +00:00
|
|
|
int ret;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (unlikely(sess == NULL)) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("invalid session struct");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-08-30 05:50:57 +00:00
|
|
|
memset(session, 0, sizeof(dpaa_sec_session));
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
/* Default IV length = 0 */
|
|
|
|
session->iv.length = 0;
|
|
|
|
|
|
|
|
/* Cipher Only */
|
|
|
|
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
|
|
|
|
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
|
2019-11-06 05:17:23 +00:00
|
|
|
ret = dpaa_sec_cipher_init(dev, xform, session);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
/* Authentication Only */
|
|
|
|
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
|
|
|
|
xform->next == NULL) {
|
|
|
|
session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
|
2019-10-14 06:53:30 +00:00
|
|
|
session->ctxt = DPAA_SEC_AUTH;
|
2019-11-06 05:17:23 +00:00
|
|
|
ret = dpaa_sec_auth_init(dev, xform, session);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
/* Cipher then Authenticate */
|
|
|
|
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
|
|
|
|
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
|
|
|
|
if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
|
2019-10-14 06:53:30 +00:00
|
|
|
session->auth_cipher_text = 1;
|
2019-11-06 05:17:23 +00:00
|
|
|
if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
|
|
|
|
ret = dpaa_sec_auth_init(dev, xform, session);
|
|
|
|
else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
|
|
|
|
ret = dpaa_sec_cipher_init(dev, xform, session);
|
|
|
|
else
|
|
|
|
ret = dpaa_sec_chain_init(dev, xform, session);
|
2017-10-09 14:21:40 +00:00
|
|
|
} else {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Not supported: Auth then Cipher");
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
/* Authenticate then Cipher */
|
|
|
|
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
|
|
|
|
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
|
|
|
|
if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
|
2019-10-14 06:53:30 +00:00
|
|
|
session->auth_cipher_text = 0;
|
2019-11-06 05:17:23 +00:00
|
|
|
if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
|
|
|
|
ret = dpaa_sec_cipher_init(dev, xform, session);
|
|
|
|
else if (xform->next->cipher.algo
|
|
|
|
== RTE_CRYPTO_CIPHER_NULL)
|
|
|
|
ret = dpaa_sec_auth_init(dev, xform, session);
|
|
|
|
else
|
|
|
|
ret = dpaa_sec_chain_init(dev, xform, session);
|
2017-10-09 14:21:40 +00:00
|
|
|
} else {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Not supported: Auth then Cipher");
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* AEAD operation for AES-GCM kind of Algorithms */
|
|
|
|
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
|
|
|
|
xform->next == NULL) {
|
2019-11-06 05:17:23 +00:00
|
|
|
ret = dpaa_sec_aead_init(dev, xform, session);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
} else {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Invalid crypto type");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-11-06 05:17:23 +00:00
|
|
|
if (ret) {
|
|
|
|
DPAA_SEC_ERR("unable to init session");
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2018-08-30 05:50:58 +00:00
|
|
|
rte_spinlock_lock(&internals->lock);
|
2019-01-09 15:14:17 +00:00
|
|
|
for (i = 0; i < MAX_DPAA_CORES; i++) {
|
|
|
|
session->inq[i] = dpaa_sec_attach_rxq(internals);
|
|
|
|
if (session->inq[i] == NULL) {
|
|
|
|
DPAA_SEC_ERR("unable to attach sec queue");
|
|
|
|
rte_spinlock_unlock(&internals->lock);
|
2020-05-09 22:22:59 +00:00
|
|
|
ret = -EBUSY;
|
2019-01-09 15:14:17 +00:00
|
|
|
goto err1;
|
|
|
|
}
|
2018-01-15 06:35:52 +00:00
|
|
|
}
|
2019-01-09 15:14:17 +00:00
|
|
|
rte_spinlock_unlock(&internals->lock);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
return 0;
|
2018-01-15 06:35:52 +00:00
|
|
|
|
|
|
|
err1:
|
crypto/dpaa_sec: improve memory freeing
This patch fixes management of memory for authentication
and encryption keys.
There were two issues with former state of implementation:
1) Invalid access to dpaa_sec_session union members
The dpaa_sec_session structure includes an anonymous union:
union {
struct {...} aead_key;
struct {
struct {...} cipher_key;
struct {...} auth_key;
};
};
Depending on the used algorithm a rte_zmalloc() function
allocated memory that was kept in aead_key, cipher_key
or auth_key. However every time the memory was released,
rte_free() was called only on cipher and auth keys, even
if pointer to allocated memory was stored in aead_key.
The C language specification defines such behavior as undefined.
As the cipher_key and aead_key are similar, have same sizes and
alignment, it has worked, but it's directly against C specification.
This patch fixes this, providing a free_session_data() function
to free the keys data. It verifies which algorithm was used
(aead or auth+cipher) and frees proper part of the union.
2) Some keys might have been freed multiple times
In functions like: dpaa_sec_cipher_init(), dpaa_sec_auth_init(),
dpaa_sec_chain_init(), dpaa_sec_aead_init() keys data were freed
before returning due to some error conditions. However the pointers
were not zeroed causing another calls to ret_free from higher
layers of code. This causes an error log about invalid memory address
to be printed.
This patch fixes it by making only one layer responsible for freeing
memory
Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
2020-05-05 21:41:04 +00:00
|
|
|
free_session_data(session);
|
2020-05-09 22:22:59 +00:00
|
|
|
return ret;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-07-05 02:08:04 +00:00
|
|
|
dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
|
2017-10-09 14:21:40 +00:00
|
|
|
struct rte_crypto_sym_xform *xform,
|
|
|
|
struct rte_cryptodev_sym_session *sess,
|
|
|
|
struct rte_mempool *mempool)
|
|
|
|
{
|
|
|
|
void *sess_private_data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (rte_mempool_get(mempool, &sess_private_data)) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Couldn't get object from session mempool");
|
2017-10-09 14:21:40 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
|
|
|
|
if (ret != 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("failed to configure session parameters");
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
/* Return session to mempool */
|
|
|
|
rte_mempool_put(mempool, sess_private_data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-07-05 02:08:04 +00:00
|
|
|
set_sym_session_private_data(sess, dev->driver_id,
|
2017-10-09 14:21:40 +00:00
|
|
|
sess_private_data);
|
|
|
|
|
2018-01-15 06:35:52 +00:00
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-02 12:26:03 +00:00
|
|
|
static inline void
|
|
|
|
free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *qi = dev->data->dev_private;
|
|
|
|
struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_DPAA_CORES; i++) {
|
|
|
|
if (s->inq[i])
|
|
|
|
dpaa_sec_detach_rxq(qi, s->inq[i]);
|
|
|
|
s->inq[i] = NULL;
|
|
|
|
s->qp[i] = NULL;
|
|
|
|
}
|
crypto/dpaa_sec: improve memory freeing
This patch fixes management of memory for authentication
and encryption keys.
There were two issues with former state of implementation:
1) Invalid access to dpaa_sec_session union members
The dpaa_sec_session structure includes an anonymous union:
union {
struct {...} aead_key;
struct {
struct {...} cipher_key;
struct {...} auth_key;
};
};
Depending on the used algorithm a rte_zmalloc() function
allocated memory that was kept in aead_key, cipher_key
or auth_key. However every time the memory was released,
rte_free() was called only on cipher and auth keys, even
if pointer to allocated memory was stored in aead_key.
The C language specification defines such behavior as undefined.
As the cipher_key and aead_key are similar, have same sizes and
alignment, it has worked, but it's directly against C specification.
This patch fixes this, providing a free_session_data() function
to free the keys data. It verifies which algorithm was used
(aead or auth+cipher) and frees proper part of the union.
2) Some keys might have been freed multiple times
In functions like: dpaa_sec_cipher_init(), dpaa_sec_auth_init(),
dpaa_sec_chain_init(), dpaa_sec_aead_init() keys data were freed
before returning due to some error conditions. However the pointers
were not zeroed causing another calls to ret_free from higher
layers of code. This causes an error log about invalid memory address
to be printed.
This patch fixes it by making only one layer responsible for freeing
memory
Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
2020-05-05 21:41:04 +00:00
|
|
|
free_session_data(s);
|
2019-09-02 12:26:03 +00:00
|
|
|
rte_mempool_put(sess_mp, (void *)s);
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
/** Clear the memory of session so it doesn't leave key material behind */
|
|
|
|
static void
|
2018-07-05 02:08:04 +00:00
|
|
|
dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
|
2017-10-09 14:21:40 +00:00
|
|
|
struct rte_cryptodev_sym_session *sess)
|
|
|
|
{
|
2018-01-15 06:35:52 +00:00
|
|
|
PMD_INIT_FUNC_TRACE();
|
2019-09-02 12:26:03 +00:00
|
|
|
uint8_t index = dev->driver_id;
|
|
|
|
void *sess_priv = get_sym_session_private_data(sess, index);
|
2017-10-09 14:21:40 +00:00
|
|
|
dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
|
|
|
|
|
|
|
|
if (sess_priv) {
|
2019-09-02 12:26:03 +00:00
|
|
|
free_session_memory(dev, s);
|
2018-07-05 02:08:04 +00:00
|
|
|
set_sym_session_private_data(sess, index, NULL);
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-15 06:35:53 +00:00
|
|
|
static int
|
2019-11-06 05:17:25 +00:00
|
|
|
dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
|
|
|
|
struct rte_security_ipsec_xform *ipsec_xform,
|
|
|
|
dpaa_sec_session *session)
|
2018-01-15 06:35:53 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2019-11-06 05:17:25 +00:00
|
|
|
session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for aead key");
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOMEM;
|
2018-01-15 06:35:53 +00:00
|
|
|
}
|
2019-11-06 05:17:25 +00:00
|
|
|
memcpy(session->aead_key.data, aead_xform->key.data,
|
|
|
|
aead_xform->key.length);
|
|
|
|
|
|
|
|
session->digest_length = aead_xform->digest_length;
|
|
|
|
session->aead_key.length = aead_xform->key.length;
|
|
|
|
|
|
|
|
switch (aead_xform->algo) {
|
|
|
|
case RTE_CRYPTO_AEAD_AES_GCM:
|
|
|
|
switch (session->digest_length) {
|
|
|
|
case 8:
|
|
|
|
session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
|
|
|
|
break;
|
|
|
|
case 12:
|
|
|
|
session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
|
|
|
|
session->digest_length);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -EINVAL;
|
2019-11-06 05:17:25 +00:00
|
|
|
}
|
|
|
|
if (session->dir == DIR_ENC) {
|
|
|
|
memcpy(session->encap_pdb.gcm.salt,
|
|
|
|
(uint8_t *)&(ipsec_xform->salt), 4);
|
|
|
|
} else {
|
|
|
|
memcpy(session->decap_pdb.gcm.salt,
|
|
|
|
(uint8_t *)&(ipsec_xform->salt), 4);
|
|
|
|
}
|
|
|
|
session->aead_key.algmode = OP_ALG_AAI_GCM;
|
|
|
|
session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
|
|
|
|
aead_xform->algo);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-11-06 05:17:25 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2018-01-15 06:35:53 +00:00
|
|
|
|
2019-11-06 05:17:25 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
|
|
|
|
struct rte_crypto_auth_xform *auth_xform,
|
2019-11-06 05:17:31 +00:00
|
|
|
struct rte_security_ipsec_xform *ipsec_xform,
|
2019-11-06 05:17:25 +00:00
|
|
|
dpaa_sec_session *session)
|
|
|
|
{
|
|
|
|
if (cipher_xform) {
|
2018-08-30 05:51:05 +00:00
|
|
|
session->cipher_key.data = rte_zmalloc(NULL,
|
|
|
|
cipher_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->cipher_key.data == NULL &&
|
|
|
|
cipher_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for cipher key");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-11-06 05:17:25 +00:00
|
|
|
|
|
|
|
session->cipher_key.length = cipher_xform->key.length;
|
2018-08-30 05:51:05 +00:00
|
|
|
memcpy(session->cipher_key.data, cipher_xform->key.data,
|
|
|
|
cipher_xform->key.length);
|
|
|
|
session->cipher_alg = cipher_xform->algo;
|
|
|
|
} else {
|
|
|
|
session->cipher_key.data = NULL;
|
|
|
|
session->cipher_key.length = 0;
|
|
|
|
session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
|
2018-01-15 06:35:53 +00:00
|
|
|
}
|
|
|
|
|
2019-11-06 05:17:25 +00:00
|
|
|
if (auth_xform) {
|
2018-08-30 05:51:05 +00:00
|
|
|
session->auth_key.data = rte_zmalloc(NULL,
|
|
|
|
auth_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->auth_key.data == NULL &&
|
|
|
|
auth_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for auth key");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-11-06 05:17:25 +00:00
|
|
|
session->auth_key.length = auth_xform->key.length;
|
2018-08-30 05:51:05 +00:00
|
|
|
memcpy(session->auth_key.data, auth_xform->key.data,
|
|
|
|
auth_xform->key.length);
|
|
|
|
session->auth_alg = auth_xform->algo;
|
2019-11-06 05:17:27 +00:00
|
|
|
session->digest_length = auth_xform->digest_length;
|
2018-08-30 05:51:05 +00:00
|
|
|
} else {
|
|
|
|
session->auth_key.data = NULL;
|
|
|
|
session->auth_key.length = 0;
|
2018-01-15 06:35:53 +00:00
|
|
|
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-06 05:17:25 +00:00
|
|
|
switch (session->auth_alg) {
|
|
|
|
case RTE_CRYPTO_AUTH_SHA1_HMAC:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_MD5_HMAC:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA256_HMAC:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
2019-11-06 05:17:27 +00:00
|
|
|
if (session->digest_length != 16)
|
|
|
|
DPAA_SEC_WARN(
|
|
|
|
"+++Using sha256-hmac truncated len is non-standard,"
|
|
|
|
"it will not work with lookaside proto");
|
2019-11-06 05:17:25 +00:00
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA384_HMAC:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA512_HMAC:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
|
|
|
|
session->auth_key.algmode = OP_ALG_AAI_HMAC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_AES_CMAC:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_NULL:
|
|
|
|
session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_SHA224_HMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
|
|
|
|
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA1:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA256:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA512:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA224:
|
|
|
|
case RTE_CRYPTO_AUTH_SHA384:
|
|
|
|
case RTE_CRYPTO_AUTH_MD5:
|
|
|
|
case RTE_CRYPTO_AUTH_AES_GMAC:
|
|
|
|
case RTE_CRYPTO_AUTH_KASUMI_F9:
|
|
|
|
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
|
|
|
|
case RTE_CRYPTO_AUTH_ZUC_EIA3:
|
|
|
|
DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
|
|
|
|
session->auth_alg);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-11-06 05:17:25 +00:00
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
|
|
|
|
session->auth_alg);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-11-06 05:17:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (session->cipher_alg) {
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CBC:
|
|
|
|
session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CBC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_3DES_CBC:
|
|
|
|
session->cipher_key.alg = OP_PCL_IPSEC_3DES;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CBC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CTR:
|
|
|
|
session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
|
|
|
|
session->cipher_key.algmode = OP_ALG_AAI_CTR;
|
2019-11-06 05:17:31 +00:00
|
|
|
if (session->dir == DIR_ENC) {
|
|
|
|
session->encap_pdb.ctr.ctr_initial = 0x00000001;
|
|
|
|
session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
|
|
|
|
} else {
|
|
|
|
session->decap_pdb.ctr.ctr_initial = 0x00000001;
|
|
|
|
session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
|
|
|
|
}
|
2019-11-06 05:17:25 +00:00
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_NULL:
|
|
|
|
session->cipher_key.alg = OP_PCL_IPSEC_NULL;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
|
|
|
|
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
|
|
|
|
case RTE_CRYPTO_CIPHER_3DES_ECB:
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_ECB:
|
|
|
|
case RTE_CRYPTO_CIPHER_KASUMI_F8:
|
|
|
|
DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
|
|
|
|
session->cipher_alg);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-11-06 05:17:25 +00:00
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
|
|
|
|
session->cipher_alg);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-11-06 05:17:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
|
|
|
|
struct rte_security_session_conf *conf,
|
|
|
|
void *sess)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
|
|
|
|
struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
|
|
|
|
struct rte_crypto_auth_xform *auth_xform = NULL;
|
|
|
|
struct rte_crypto_cipher_xform *cipher_xform = NULL;
|
|
|
|
struct rte_crypto_aead_xform *aead_xform = NULL;
|
|
|
|
dpaa_sec_session *session = (dpaa_sec_session *)sess;
|
|
|
|
uint32_t i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
memset(session, 0, sizeof(dpaa_sec_session));
|
|
|
|
session->proto_alg = conf->protocol;
|
|
|
|
session->ctxt = DPAA_SEC_IPSEC;
|
|
|
|
|
|
|
|
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
|
|
|
|
session->dir = DIR_ENC;
|
|
|
|
else
|
|
|
|
session->dir = DIR_DEC;
|
|
|
|
|
|
|
|
if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
|
|
|
|
cipher_xform = &conf->crypto_xform->cipher;
|
|
|
|
if (conf->crypto_xform->next)
|
|
|
|
auth_xform = &conf->crypto_xform->next->auth;
|
|
|
|
ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
|
2019-11-06 05:17:31 +00:00
|
|
|
ipsec_xform, session);
|
2019-11-06 05:17:25 +00:00
|
|
|
} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
|
|
|
|
auth_xform = &conf->crypto_xform->auth;
|
|
|
|
if (conf->crypto_xform->next)
|
|
|
|
cipher_xform = &conf->crypto_xform->next->cipher;
|
|
|
|
ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
|
2019-11-06 05:17:31 +00:00
|
|
|
ipsec_xform, session);
|
2019-11-06 05:17:25 +00:00
|
|
|
} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
|
|
|
|
aead_xform = &conf->crypto_xform->aead;
|
|
|
|
ret = dpaa_sec_ipsec_aead_init(aead_xform,
|
|
|
|
ipsec_xform, session);
|
|
|
|
} else {
|
|
|
|
DPAA_SEC_ERR("XFORM not specified");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
DPAA_SEC_ERR("Failed to process xform");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-01-15 06:35:53 +00:00
|
|
|
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
|
2019-09-05 12:48:05 +00:00
|
|
|
if (ipsec_xform->tunnel.type ==
|
|
|
|
RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
|
|
|
|
session->ip4_hdr.ip_v = IPVERSION;
|
|
|
|
session->ip4_hdr.ip_hl = 5;
|
|
|
|
session->ip4_hdr.ip_len = rte_cpu_to_be_16(
|
2018-01-15 06:35:53 +00:00
|
|
|
sizeof(session->ip4_hdr));
|
2019-09-05 12:48:05 +00:00
|
|
|
session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
|
|
|
|
session->ip4_hdr.ip_id = 0;
|
|
|
|
session->ip4_hdr.ip_off = 0;
|
|
|
|
session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
|
|
|
|
session->ip4_hdr.ip_p = (ipsec_xform->proto ==
|
|
|
|
RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
|
|
|
|
IPPROTO_ESP : IPPROTO_AH;
|
|
|
|
session->ip4_hdr.ip_sum = 0;
|
|
|
|
session->ip4_hdr.ip_src =
|
|
|
|
ipsec_xform->tunnel.ipv4.src_ip;
|
|
|
|
session->ip4_hdr.ip_dst =
|
|
|
|
ipsec_xform->tunnel.ipv4.dst_ip;
|
|
|
|
session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
|
2018-01-15 06:35:53 +00:00
|
|
|
(void *)&session->ip4_hdr,
|
|
|
|
sizeof(struct ip));
|
2019-09-05 12:48:05 +00:00
|
|
|
session->encap_pdb.ip_hdr_len = sizeof(struct ip);
|
|
|
|
} else if (ipsec_xform->tunnel.type ==
|
|
|
|
RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
|
|
|
|
session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
|
|
|
|
DPAA_IPv6_DEFAULT_VTC_FLOW |
|
|
|
|
((ipsec_xform->tunnel.ipv6.dscp <<
|
|
|
|
RTE_IPV6_HDR_TC_SHIFT) &
|
|
|
|
RTE_IPV6_HDR_TC_MASK) |
|
|
|
|
((ipsec_xform->tunnel.ipv6.flabel <<
|
|
|
|
RTE_IPV6_HDR_FL_SHIFT) &
|
|
|
|
RTE_IPV6_HDR_FL_MASK));
|
|
|
|
/* Payload length will be updated by HW */
|
|
|
|
session->ip6_hdr.payload_len = 0;
|
|
|
|
session->ip6_hdr.hop_limits =
|
|
|
|
ipsec_xform->tunnel.ipv6.hlimit;
|
|
|
|
session->ip6_hdr.proto = (ipsec_xform->proto ==
|
|
|
|
RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
|
|
|
|
IPPROTO_ESP : IPPROTO_AH;
|
|
|
|
memcpy(&session->ip6_hdr.src_addr,
|
|
|
|
&ipsec_xform->tunnel.ipv6.src_addr, 16);
|
|
|
|
memcpy(&session->ip6_hdr.dst_addr,
|
|
|
|
&ipsec_xform->tunnel.ipv6.dst_addr, 16);
|
|
|
|
session->encap_pdb.ip_hdr_len =
|
|
|
|
sizeof(struct rte_ipv6_hdr);
|
|
|
|
}
|
2018-01-15 06:35:53 +00:00
|
|
|
session->encap_pdb.options =
|
|
|
|
(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
|
|
|
|
PDBOPTS_ESP_OIHI_PDB_INL |
|
|
|
|
PDBOPTS_ESP_IVSRC |
|
2018-08-30 05:51:01 +00:00
|
|
|
PDBHMO_ESP_ENCAP_DTTL |
|
|
|
|
PDBHMO_ESP_SNR;
|
2019-09-02 12:27:00 +00:00
|
|
|
if (ipsec_xform->options.esn)
|
|
|
|
session->encap_pdb.options |= PDBOPTS_ESP_ESN;
|
2018-01-15 06:35:53 +00:00
|
|
|
session->encap_pdb.spi = ipsec_xform->spi;
|
2019-11-06 05:17:25 +00:00
|
|
|
|
2018-01-15 06:35:53 +00:00
|
|
|
} else if (ipsec_xform->direction ==
|
|
|
|
RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
|
2019-09-05 12:48:05 +00:00
|
|
|
if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
|
|
|
|
session->decap_pdb.options = sizeof(struct ip) << 16;
|
|
|
|
else
|
|
|
|
session->decap_pdb.options =
|
|
|
|
sizeof(struct rte_ipv6_hdr) << 16;
|
2019-09-02 12:27:00 +00:00
|
|
|
if (ipsec_xform->options.esn)
|
|
|
|
session->decap_pdb.options |= PDBOPTS_ESP_ESN;
|
2019-11-06 06:54:14 +00:00
|
|
|
if (ipsec_xform->replay_win_sz) {
|
|
|
|
uint32_t win_sz;
|
|
|
|
win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
|
|
|
|
|
|
|
|
switch (win_sz) {
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
case 16:
|
|
|
|
case 32:
|
|
|
|
session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
|
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
session->decap_pdb.options |=
|
|
|
|
PDBOPTS_ESP_ARS128;
|
|
|
|
}
|
|
|
|
}
|
2018-01-15 06:35:53 +00:00
|
|
|
} else
|
|
|
|
goto out;
|
2018-08-30 05:50:58 +00:00
|
|
|
rte_spinlock_lock(&internals->lock);
|
2019-01-09 15:14:17 +00:00
|
|
|
for (i = 0; i < MAX_DPAA_CORES; i++) {
|
|
|
|
session->inq[i] = dpaa_sec_attach_rxq(internals);
|
|
|
|
if (session->inq[i] == NULL) {
|
|
|
|
DPAA_SEC_ERR("unable to attach sec queue");
|
|
|
|
rte_spinlock_unlock(&internals->lock);
|
|
|
|
goto out;
|
|
|
|
}
|
2018-01-15 06:35:53 +00:00
|
|
|
}
|
2019-01-09 15:14:17 +00:00
|
|
|
rte_spinlock_unlock(&internals->lock);
|
2018-01-15 06:35:53 +00:00
|
|
|
|
2019-01-09 15:13:16 +00:00
|
|
|
return 0;
|
|
|
|
out:
|
crypto/dpaa_sec: improve memory freeing
This patch fixes management of memory for authentication
and encryption keys.
There were two issues with former state of implementation:
1) Invalid access to dpaa_sec_session union members
The dpaa_sec_session structure includes an anonymous union:
union {
struct {...} aead_key;
struct {
struct {...} cipher_key;
struct {...} auth_key;
};
};
Depending on the used algorithm a rte_zmalloc() function
allocated memory that was kept in aead_key, cipher_key
or auth_key. However every time the memory was released,
rte_free() was called only on cipher and auth keys, even
if pointer to allocated memory was stored in aead_key.
The C language specification defines such behavior as undefined.
As the cipher_key and aead_key are similar, have same sizes and
alignment, it has worked, but it's directly against C specification.
This patch fixes this, providing a free_session_data() function
to free the keys data. It verifies which algorithm was used
(aead or auth+cipher) and frees proper part of the union.
2) Some keys might have been freed multiple times
In functions like: dpaa_sec_cipher_init(), dpaa_sec_auth_init(),
dpaa_sec_chain_init(), dpaa_sec_aead_init() keys data were freed
before returning due to some error conditions. However the pointers
were not zeroed causing another calls to ret_free from higher
layers of code. This causes an error log about invalid memory address
to be printed.
This patch fixes it by making only one layer responsible for freeing
memory
Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
2020-05-05 21:41:04 +00:00
|
|
|
free_session_data(session);
|
2019-01-09 15:13:16 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
|
|
|
|
struct rte_security_session_conf *conf,
|
|
|
|
void *sess)
|
|
|
|
{
|
|
|
|
struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
|
|
|
|
struct rte_crypto_sym_xform *xform = conf->crypto_xform;
|
|
|
|
struct rte_crypto_auth_xform *auth_xform = NULL;
|
|
|
|
struct rte_crypto_cipher_xform *cipher_xform = NULL;
|
|
|
|
dpaa_sec_session *session = (dpaa_sec_session *)sess;
|
|
|
|
struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
|
2019-01-09 15:14:17 +00:00
|
|
|
uint32_t i;
|
2020-05-09 22:22:59 +00:00
|
|
|
int ret;
|
2019-01-09 15:13:16 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
memset(session, 0, sizeof(dpaa_sec_session));
|
|
|
|
|
|
|
|
/* find xfrm types */
|
|
|
|
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
|
|
|
|
cipher_xform = &xform->cipher;
|
|
|
|
if (xform->next != NULL)
|
|
|
|
auth_xform = &xform->next->auth;
|
|
|
|
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
|
|
|
|
auth_xform = &xform->auth;
|
|
|
|
if (xform->next != NULL)
|
|
|
|
cipher_xform = &xform->next->cipher;
|
|
|
|
} else {
|
|
|
|
DPAA_SEC_ERR("Invalid crypto type");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
session->proto_alg = conf->protocol;
|
2019-10-14 06:53:30 +00:00
|
|
|
session->ctxt = DPAA_SEC_PDCP;
|
|
|
|
|
2019-01-09 15:13:16 +00:00
|
|
|
if (cipher_xform) {
|
2019-10-14 06:53:30 +00:00
|
|
|
switch (cipher_xform->algo) {
|
|
|
|
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
|
|
|
|
session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
|
|
|
|
session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_AES_CTR:
|
|
|
|
session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_CIPHER_NULL:
|
|
|
|
session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
|
|
|
|
session->cipher_alg);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -EINVAL;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 15:13:16 +00:00
|
|
|
session->cipher_key.data = rte_zmalloc(NULL,
|
|
|
|
cipher_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (session->cipher_key.data == NULL &&
|
|
|
|
cipher_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for cipher key");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
session->cipher_key.length = cipher_xform->key.length;
|
|
|
|
memcpy(session->cipher_key.data, cipher_xform->key.data,
|
|
|
|
cipher_xform->key.length);
|
|
|
|
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
|
|
|
|
DIR_ENC : DIR_DEC;
|
|
|
|
session->cipher_alg = cipher_xform->algo;
|
|
|
|
} else {
|
|
|
|
session->cipher_key.data = NULL;
|
|
|
|
session->cipher_key.length = 0;
|
|
|
|
session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
|
|
|
|
session->dir = DIR_ENC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
|
2019-09-30 14:40:41 +00:00
|
|
|
if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
|
|
|
|
pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
|
2019-01-09 15:13:16 +00:00
|
|
|
DPAA_SEC_ERR(
|
2019-09-30 14:40:41 +00:00
|
|
|
"PDCP Seq Num size should be 5/12 bits for cmode");
|
2020-05-09 22:22:59 +00:00
|
|
|
ret = -EINVAL;
|
2019-01-09 15:13:16 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2019-09-30 14:40:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (auth_xform) {
|
2019-10-14 06:53:30 +00:00
|
|
|
switch (auth_xform->algo) {
|
|
|
|
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
|
|
|
|
session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_ZUC_EIA3:
|
|
|
|
session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_AES_CMAC:
|
|
|
|
session->auth_key.alg = PDCP_AUTH_TYPE_AES;
|
|
|
|
break;
|
|
|
|
case RTE_CRYPTO_AUTH_NULL:
|
|
|
|
session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
|
|
|
|
session->auth_alg);
|
|
|
|
rte_free(session->cipher_key.data);
|
2020-05-09 22:22:59 +00:00
|
|
|
return -EINVAL;
|
2019-10-14 06:53:30 +00:00
|
|
|
}
|
2019-09-30 14:40:42 +00:00
|
|
|
session->auth_key.data = rte_zmalloc(NULL,
|
|
|
|
auth_xform->key.length,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (!session->auth_key.data &&
|
|
|
|
auth_xform->key.length > 0) {
|
|
|
|
DPAA_SEC_ERR("No Memory for auth key");
|
|
|
|
rte_free(session->cipher_key.data);
|
|
|
|
return -ENOMEM;
|
2019-01-09 15:13:16 +00:00
|
|
|
}
|
2019-09-30 14:40:42 +00:00
|
|
|
session->auth_key.length = auth_xform->key.length;
|
|
|
|
memcpy(session->auth_key.data, auth_xform->key.data,
|
|
|
|
auth_xform->key.length);
|
|
|
|
session->auth_alg = auth_xform->algo;
|
|
|
|
} else {
|
|
|
|
session->auth_key.data = NULL;
|
|
|
|
session->auth_key.length = 0;
|
|
|
|
session->auth_alg = 0;
|
2019-01-09 15:13:16 +00:00
|
|
|
}
|
|
|
|
session->pdcp.domain = pdcp_xform->domain;
|
|
|
|
session->pdcp.bearer = pdcp_xform->bearer;
|
|
|
|
session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
|
|
|
|
session->pdcp.sn_size = pdcp_xform->sn_size;
|
|
|
|
session->pdcp.hfn = pdcp_xform->hfn;
|
|
|
|
session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
|
2019-09-30 14:40:44 +00:00
|
|
|
session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
|
2020-10-12 14:10:03 +00:00
|
|
|
session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
|
2020-09-05 10:26:02 +00:00
|
|
|
if (cipher_xform)
|
|
|
|
session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
|
2018-01-15 06:35:53 +00:00
|
|
|
|
2019-01-09 15:13:16 +00:00
|
|
|
rte_spinlock_lock(&dev_priv->lock);
|
2019-01-09 15:14:17 +00:00
|
|
|
for (i = 0; i < MAX_DPAA_CORES; i++) {
|
|
|
|
session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
|
|
|
|
if (session->inq[i] == NULL) {
|
|
|
|
DPAA_SEC_ERR("unable to attach sec queue");
|
|
|
|
rte_spinlock_unlock(&dev_priv->lock);
|
2020-05-09 22:22:59 +00:00
|
|
|
ret = -EBUSY;
|
2019-01-09 15:14:17 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2019-01-09 15:13:16 +00:00
|
|
|
}
|
2019-01-09 15:14:17 +00:00
|
|
|
rte_spinlock_unlock(&dev_priv->lock);
|
2018-01-15 06:35:53 +00:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
rte_free(session->auth_key.data);
|
|
|
|
rte_free(session->cipher_key.data);
|
|
|
|
memset(session, 0, sizeof(dpaa_sec_session));
|
2020-05-09 22:22:59 +00:00
|
|
|
return ret;
|
2018-01-15 06:35:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_security_session_create(void *dev,
|
|
|
|
struct rte_security_session_conf *conf,
|
|
|
|
struct rte_security_session *sess,
|
|
|
|
struct rte_mempool *mempool)
|
|
|
|
{
|
|
|
|
void *sess_private_data;
|
|
|
|
struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (rte_mempool_get(mempool, &sess_private_data)) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("Couldn't get object from session mempool");
|
2018-01-15 06:35:53 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (conf->protocol) {
|
|
|
|
case RTE_SECURITY_PROTOCOL_IPSEC:
|
|
|
|
ret = dpaa_sec_set_ipsec_session(cdev, conf,
|
|
|
|
sess_private_data);
|
|
|
|
break;
|
2019-01-09 15:13:16 +00:00
|
|
|
case RTE_SECURITY_PROTOCOL_PDCP:
|
|
|
|
ret = dpaa_sec_set_pdcp_session(cdev, conf,
|
|
|
|
sess_private_data);
|
|
|
|
break;
|
2018-01-15 06:35:53 +00:00
|
|
|
case RTE_SECURITY_PROTOCOL_MACSEC:
|
|
|
|
return -ENOTSUP;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (ret != 0) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("failed to configure session parameters");
|
2018-01-15 06:35:53 +00:00
|
|
|
/* Return session to mempool */
|
|
|
|
rte_mempool_put(mempool, sess_private_data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_sec_session_private_data(sess, sess_private_data);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Clear the memory of session so it doesn't leave key material behind */
|
|
|
|
static int
|
|
|
|
dpaa_sec_security_session_destroy(void *dev __rte_unused,
|
|
|
|
struct rte_security_session *sess)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
void *sess_priv = get_sec_session_private_data(sess);
|
|
|
|
dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
|
|
|
|
|
|
|
|
if (sess_priv) {
|
2019-09-02 12:26:03 +00:00
|
|
|
free_session_memory((struct rte_cryptodev *)dev, s);
|
2018-01-15 06:35:53 +00:00
|
|
|
set_sec_session_private_data(sess, NULL);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2017-10-09 14:21:40 +00:00
|
|
|
static int
|
2019-09-30 14:41:03 +00:00
|
|
|
dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
|
2017-10-09 14:21:40 +00:00
|
|
|
struct rte_cryptodev_config *config __rte_unused)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-04-02 15:33:20 +00:00
|
|
|
dpaa_sec_dev_close(struct rte_cryptodev *dev)
|
2017-10-09 14:21:40 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
2018-04-02 15:33:20 +00:00
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
|
|
|
|
struct rte_cryptodev_info *info)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
if (info != NULL) {
|
|
|
|
info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
|
|
|
|
info->feature_flags = dev->feature_flags;
|
|
|
|
info->capabilities = dpaa_sec_capabilities;
|
|
|
|
info->sym.max_nb_sessions = internals->max_nb_sessions;
|
|
|
|
info->driver_id = cryptodev_driver_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-04 11:26:58 +00:00
|
|
|
static enum qman_cb_dqrr_result
|
|
|
|
dpaa_sec_process_parallel_event(void *event,
|
|
|
|
struct qman_portal *qm __always_unused,
|
|
|
|
struct qman_fq *outq,
|
|
|
|
const struct qm_dqrr_entry *dqrr,
|
|
|
|
void **bufs)
|
|
|
|
{
|
|
|
|
const struct qm_fd *fd;
|
|
|
|
struct dpaa_sec_job *job;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct rte_event *ev = (struct rte_event *)event;
|
|
|
|
|
|
|
|
fd = &dqrr->fd;
|
|
|
|
|
|
|
|
/* sg is embedded in an op ctx,
|
|
|
|
* sg[0] is for output
|
|
|
|
* sg[1] for input
|
|
|
|
*/
|
2020-01-27 09:07:23 +00:00
|
|
|
job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
|
2019-10-04 11:26:58 +00:00
|
|
|
|
|
|
|
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
|
|
|
|
ctx->fd_status = fd->status;
|
|
|
|
if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
|
|
|
|
struct qm_sg_entry *sg_out;
|
|
|
|
uint32_t len;
|
|
|
|
|
|
|
|
sg_out = &job->sg[0];
|
|
|
|
hw_sg_to_cpu(sg_out);
|
|
|
|
len = sg_out->length;
|
|
|
|
ctx->op->sym->m_src->pkt_len = len;
|
|
|
|
ctx->op->sym->m_src->data_len = len;
|
|
|
|
}
|
|
|
|
if (!ctx->fd_status) {
|
|
|
|
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
|
|
|
|
} else {
|
|
|
|
DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
|
|
|
|
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
|
|
|
|
}
|
|
|
|
ev->event_ptr = (void *)ctx->op;
|
|
|
|
|
|
|
|
ev->flow_id = outq->ev.flow_id;
|
|
|
|
ev->sub_event_type = outq->ev.sub_event_type;
|
|
|
|
ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
|
|
|
|
ev->op = RTE_EVENT_OP_NEW;
|
|
|
|
ev->sched_type = outq->ev.sched_type;
|
|
|
|
ev->queue_id = outq->ev.queue_id;
|
|
|
|
ev->priority = outq->ev.priority;
|
|
|
|
*bufs = (void *)ctx->op;
|
|
|
|
|
|
|
|
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
|
|
|
|
|
|
|
|
return qman_cb_dqrr_consume;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum qman_cb_dqrr_result
|
|
|
|
dpaa_sec_process_atomic_event(void *event,
|
|
|
|
struct qman_portal *qm __rte_unused,
|
|
|
|
struct qman_fq *outq,
|
|
|
|
const struct qm_dqrr_entry *dqrr,
|
|
|
|
void **bufs)
|
|
|
|
{
|
|
|
|
u8 index;
|
|
|
|
const struct qm_fd *fd;
|
|
|
|
struct dpaa_sec_job *job;
|
|
|
|
struct dpaa_sec_op_ctx *ctx;
|
|
|
|
struct rte_event *ev = (struct rte_event *)event;
|
|
|
|
|
|
|
|
fd = &dqrr->fd;
|
|
|
|
|
|
|
|
/* sg is embedded in an op ctx,
|
|
|
|
* sg[0] is for output
|
|
|
|
* sg[1] for input
|
|
|
|
*/
|
2020-01-27 09:07:23 +00:00
|
|
|
job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
|
2019-10-04 11:26:58 +00:00
|
|
|
|
|
|
|
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
|
|
|
|
ctx->fd_status = fd->status;
|
|
|
|
if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
|
|
|
|
struct qm_sg_entry *sg_out;
|
|
|
|
uint32_t len;
|
|
|
|
|
|
|
|
sg_out = &job->sg[0];
|
|
|
|
hw_sg_to_cpu(sg_out);
|
|
|
|
len = sg_out->length;
|
|
|
|
ctx->op->sym->m_src->pkt_len = len;
|
|
|
|
ctx->op->sym->m_src->data_len = len;
|
|
|
|
}
|
|
|
|
if (!ctx->fd_status) {
|
|
|
|
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
|
|
|
|
} else {
|
|
|
|
DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
|
|
|
|
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
|
|
|
|
}
|
|
|
|
ev->event_ptr = (void *)ctx->op;
|
|
|
|
ev->flow_id = outq->ev.flow_id;
|
|
|
|
ev->sub_event_type = outq->ev.sub_event_type;
|
|
|
|
ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
|
|
|
|
ev->op = RTE_EVENT_OP_NEW;
|
|
|
|
ev->sched_type = outq->ev.sched_type;
|
|
|
|
ev->queue_id = outq->ev.queue_id;
|
|
|
|
ev->priority = outq->ev.priority;
|
|
|
|
|
|
|
|
/* Save active dqrr entries */
|
|
|
|
index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
|
|
|
|
DPAA_PER_LCORE_DQRR_SIZE++;
|
|
|
|
DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
|
|
|
|
DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
|
|
|
|
ev->impl_opaque = index + 1;
|
2020-10-28 12:20:09 +00:00
|
|
|
*dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
|
2019-10-04 11:26:58 +00:00
|
|
|
*bufs = (void *)ctx->op;
|
|
|
|
|
|
|
|
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
|
|
|
|
|
|
|
|
return qman_cb_dqrr_defer;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
|
|
|
|
int qp_id,
|
|
|
|
uint16_t ch_id,
|
|
|
|
const struct rte_event *event)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
|
|
|
|
struct qm_mcc_initfq opts = {0};
|
|
|
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
|
|
|
|
QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
|
|
|
|
opts.fqd.dest.channel = ch_id;
|
|
|
|
|
|
|
|
switch (event->sched_type) {
|
|
|
|
case RTE_SCHED_TYPE_ATOMIC:
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
|
|
|
|
/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
|
|
|
|
* configuration with HOLD_ACTIVE setting
|
|
|
|
*/
|
|
|
|
opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
|
|
|
|
qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
|
|
|
|
break;
|
|
|
|
case RTE_SCHED_TYPE_ORDERED:
|
|
|
|
DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
|
2020-05-09 22:22:59 +00:00
|
|
|
return -ENOTSUP;
|
2019-10-04 11:26:58 +00:00
|
|
|
default:
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
|
|
|
|
qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
DPAA_SEC_ERR("unable to init caam source fq!");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
|
|
|
|
int qp_id)
|
|
|
|
{
|
|
|
|
struct qm_mcc_initfq opts = {0};
|
|
|
|
int ret;
|
|
|
|
struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
|
|
|
|
|
|
|
|
opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
|
|
|
|
QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
|
|
|
|
qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
|
|
|
|
qp->outq.cb.ern = ern_sec_fq_handler;
|
|
|
|
qman_retire_fq(&qp->outq, NULL);
|
|
|
|
qman_oos_fq(&qp->outq);
|
|
|
|
ret = qman_init_fq(&qp->outq, 0, &opts);
|
|
|
|
if (ret)
|
|
|
|
RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
|
|
|
|
qp->outq.cb.dqrr = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
static struct rte_cryptodev_ops crypto_ops = {
|
|
|
|
.dev_configure = dpaa_sec_dev_configure,
|
|
|
|
.dev_start = dpaa_sec_dev_start,
|
|
|
|
.dev_stop = dpaa_sec_dev_stop,
|
|
|
|
.dev_close = dpaa_sec_dev_close,
|
|
|
|
.dev_infos_get = dpaa_sec_dev_infos_get,
|
|
|
|
.queue_pair_setup = dpaa_sec_queue_pair_setup,
|
|
|
|
.queue_pair_release = dpaa_sec_queue_pair_release,
|
2018-07-05 02:08:04 +00:00
|
|
|
.sym_session_get_size = dpaa_sec_sym_session_get_size,
|
|
|
|
.sym_session_configure = dpaa_sec_sym_session_configure,
|
|
|
|
.sym_session_clear = dpaa_sec_sym_session_clear
|
2017-10-09 14:21:40 +00:00
|
|
|
};
|
|
|
|
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-15 06:35:53 +00:00
|
|
|
static const struct rte_security_capability *
|
|
|
|
dpaa_sec_capabilities_get(void *device __rte_unused)
|
|
|
|
{
|
|
|
|
return dpaa_sec_security_cap;
|
|
|
|
}
|
|
|
|
|
2018-10-28 23:57:38 +00:00
|
|
|
static const struct rte_security_ops dpaa_sec_security_ops = {
|
2018-01-15 06:35:53 +00:00
|
|
|
.session_create = dpaa_sec_security_session_create,
|
|
|
|
.session_update = NULL,
|
|
|
|
.session_stats_get = NULL,
|
|
|
|
.session_destroy = dpaa_sec_security_session_destroy,
|
|
|
|
.set_pkt_metadata = NULL,
|
|
|
|
.capabilities_get = dpaa_sec_capabilities_get
|
|
|
|
};
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2017-10-09 14:21:40 +00:00
|
|
|
static int
|
|
|
|
dpaa_sec_uninit(struct rte_cryptodev *dev)
|
|
|
|
{
|
2018-04-02 15:33:19 +00:00
|
|
|
struct dpaa_sec_dev_private *internals;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2018-04-02 15:33:19 +00:00
|
|
|
internals = dev->data->dev_private;
|
2018-01-15 06:35:53 +00:00
|
|
|
rte_free(dev->security_ctx);
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
rte_free(internals);
|
|
|
|
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
|
|
|
|
dev->data->name, rte_socket_id());
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
|
|
|
|
{
|
|
|
|
struct dpaa_sec_dev_private *internals;
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-15 06:35:53 +00:00
|
|
|
struct rte_security_ctx *security_instance;
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2017-10-09 14:21:40 +00:00
|
|
|
struct dpaa_sec_qp *qp;
|
2018-01-15 06:35:52 +00:00
|
|
|
uint32_t i, flags;
|
2017-10-09 14:21:40 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
cryptodev->driver_id = cryptodev_driver_id;
|
|
|
|
cryptodev->dev_ops = &crypto_ops;
|
|
|
|
|
|
|
|
cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
|
|
|
|
cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
|
|
|
|
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
|
|
|
|
RTE_CRYPTODEV_FF_HW_ACCELERATED |
|
2018-01-15 06:35:53 +00:00
|
|
|
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
|
2018-01-22 08:46:38 +00:00
|
|
|
RTE_CRYPTODEV_FF_SECURITY |
|
2018-07-05 02:08:02 +00:00
|
|
|
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
|
|
|
|
RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
|
|
|
|
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
|
|
|
|
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
|
|
|
|
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
internals = cryptodev->data->dev_private;
|
2018-01-15 06:35:52 +00:00
|
|
|
internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
|
2017-10-09 14:21:40 +00:00
|
|
|
internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
|
|
|
|
|
2018-01-15 06:35:53 +00:00
|
|
|
/*
|
|
|
|
* For secondary processes, we don't initialise any further as primary
|
|
|
|
* has already done this work. Only check we don't need a different
|
|
|
|
* RX function
|
|
|
|
*/
|
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_WARN("Device already init by primary process");
|
2018-01-15 06:35:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2020-10-15 15:05:54 +00:00
|
|
|
#ifdef RTE_LIB_SECURITY
|
2018-01-15 06:35:53 +00:00
|
|
|
/* Initialize security_ctx only for primary process*/
|
|
|
|
security_instance = rte_malloc("rte_security_instances_ops",
|
|
|
|
sizeof(struct rte_security_ctx), 0);
|
|
|
|
if (security_instance == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
security_instance->device = (void *)cryptodev;
|
|
|
|
security_instance->ops = &dpaa_sec_security_ops;
|
|
|
|
security_instance->sess_cnt = 0;
|
|
|
|
cryptodev->security_ctx = security_instance;
|
2019-10-23 14:17:02 +00:00
|
|
|
#endif
|
2018-08-30 05:50:58 +00:00
|
|
|
rte_spinlock_init(&internals->lock);
|
2017-10-09 14:21:40 +00:00
|
|
|
for (i = 0; i < internals->max_nb_queue_pairs; i++) {
|
|
|
|
/* init qman fq for queue pair */
|
|
|
|
qp = &internals->qps[i];
|
|
|
|
ret = dpaa_sec_init_tx(&qp->outq);
|
|
|
|
if (ret) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("config tx of queue pair %d", i);
|
2017-10-09 14:21:40 +00:00
|
|
|
goto init_error;
|
|
|
|
}
|
2018-01-15 06:35:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
|
|
|
|
QMAN_FQ_FLAG_TO_DCPORTAL;
|
2019-11-06 05:17:29 +00:00
|
|
|
for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
|
2018-01-15 06:35:52 +00:00
|
|
|
/* create rx qman fq for sessions*/
|
|
|
|
ret = qman_create_fq(0, flags, &internals->inq[i]);
|
|
|
|
if (unlikely(ret != 0)) {
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("sec qman_create_fq failed");
|
2017-10-09 14:21:40 +00:00
|
|
|
goto init_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-19 16:52:36 +00:00
|
|
|
RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
|
2017-10-09 14:21:40 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
init_error:
|
2018-04-19 16:52:36 +00:00
|
|
|
DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
2020-05-05 21:41:05 +00:00
|
|
|
rte_free(cryptodev->security_ctx);
|
2017-10-09 14:21:40 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-09-28 12:26:04 +00:00
|
|
|
cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
|
2017-10-09 14:21:40 +00:00
|
|
|
struct rte_dpaa_device *dpaa_dev)
|
|
|
|
{
|
|
|
|
struct rte_cryptodev *cryptodev;
|
|
|
|
char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
|
|
|
|
|
|
|
|
int retval;
|
|
|
|
|
2019-08-29 10:27:08 +00:00
|
|
|
snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
|
2017-10-09 14:21:40 +00:00
|
|
|
|
|
|
|
cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
|
|
|
|
if (cryptodev == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
cryptodev->data->dev_private = rte_zmalloc_socket(
|
|
|
|
"cryptodev private structure",
|
|
|
|
sizeof(struct dpaa_sec_dev_private),
|
|
|
|
RTE_CACHE_LINE_SIZE,
|
|
|
|
rte_socket_id());
|
|
|
|
|
|
|
|
if (cryptodev->data->dev_private == NULL)
|
|
|
|
rte_panic("Cannot allocate memzone for private "
|
|
|
|
"device data");
|
|
|
|
}
|
|
|
|
|
|
|
|
dpaa_dev->crypto_dev = cryptodev;
|
|
|
|
cryptodev->device = &dpaa_dev->device;
|
|
|
|
|
|
|
|
/* init user callbacks */
|
|
|
|
TAILQ_INIT(&(cryptodev->link_intr_cbs));
|
|
|
|
|
|
|
|
/* if sec device version is not configured */
|
|
|
|
if (!rta_get_sec_era()) {
|
|
|
|
const struct device_node *caam_node;
|
|
|
|
|
|
|
|
for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
|
|
|
|
const uint32_t *prop = of_get_property(caam_node,
|
|
|
|
"fsl,sec-era",
|
|
|
|
NULL);
|
|
|
|
if (prop) {
|
|
|
|
rta_set_sec_era(
|
|
|
|
INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:27 +00:00
|
|
|
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
|
2019-11-05 14:23:20 +00:00
|
|
|
retval = rte_dpaa_portal_init((void *)1);
|
|
|
|
if (retval) {
|
|
|
|
DPAA_SEC_ERR("Unable to initialize portal");
|
2020-05-05 21:41:05 +00:00
|
|
|
goto out;
|
2019-11-05 14:23:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-09 14:21:40 +00:00
|
|
|
/* Invoke PMD device initialization function */
|
|
|
|
retval = dpaa_sec_dev_init(cryptodev);
|
|
|
|
if (retval == 0)
|
|
|
|
return 0;
|
|
|
|
|
2020-05-05 21:41:05 +00:00
|
|
|
retval = -ENXIO;
|
|
|
|
out:
|
2017-10-09 14:21:40 +00:00
|
|
|
/* In case of error, cleanup is done */
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
|
|
|
rte_free(cryptodev->data->dev_private);
|
|
|
|
|
|
|
|
rte_cryptodev_pmd_release_device(cryptodev);
|
|
|
|
|
2020-05-05 21:41:05 +00:00
|
|
|
return retval;
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
|
|
|
|
{
|
|
|
|
struct rte_cryptodev *cryptodev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
cryptodev = dpaa_dev->crypto_dev;
|
|
|
|
if (cryptodev == NULL)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
ret = dpaa_sec_uninit(cryptodev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-10-25 12:00:35 +00:00
|
|
|
return rte_cryptodev_pmd_destroy(cryptodev);
|
2017-10-09 14:21:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct rte_dpaa_driver rte_dpaa_sec_driver = {
|
|
|
|
.drv_type = FSL_DPAA_CRYPTO,
|
|
|
|
.driver = {
|
|
|
|
.name = "DPAA SEC PMD"
|
|
|
|
},
|
|
|
|
.probe = cryptodev_dpaa_sec_probe,
|
|
|
|
.remove = cryptodev_dpaa_sec_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cryptodev_driver dpaa_sec_crypto_drv;
|
|
|
|
|
|
|
|
RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
|
2018-03-09 17:44:38 +00:00
|
|
|
RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
|
2017-10-09 14:21:40 +00:00
|
|
|
cryptodev_driver_id);
|
2020-07-01 12:33:35 +00:00
|
|
|
RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
|