crypto/octeontx: add asymmetric session operations

Add asymmetric session setup and free functions. RSA and modexp
operations are supported.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Kanaka Durga Kotamarthy <kkotamarthy@marvell.com>
Signed-off-by: Sunila Sahu <ssahu@marvell.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Kanaka Durga Kotamarthy 2019-10-11 18:31:33 +05:30 committed by Akhil Goyal
parent 13d711f353
commit 33bcaae5f8
7 changed files with 307 additions and 12 deletions

View File

@ -5,11 +5,13 @@
;
[Features]
Symmetric crypto = Y
Asymmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
In Place SGL = Y
OOP SGL In LB Out = Y
OOP SGL In SGL Out = Y
RSA PRIV OP KEY QT = Y
;
; Supported crypto algorithms of 'octeontx' crypto driver.
@ -64,4 +66,6 @@ AES GCM (256) = Y
;
; Supported Asymmetric algorithms of the 'octeontx' crypto driver.
;
[Asymmetric]
[Asymmetric]
RSA = Y
Modular Exponentiation = Y

View File

@ -10,8 +10,8 @@ cryptographic operations to cryptographic accelerator units on
poll mode driver enqueues the crypto request to this accelerator and dequeues
the response once the operation is completed.
Supported Algorithms
--------------------
Supported Symmetric Crypto Algorithms
-------------------------------------
Cipher Algorithms
~~~~~~~~~~~~~~~~~
@ -53,6 +53,12 @@ AEAD Algorithms
* ``RTE_CRYPTO_AEAD_AES_GCM``
Supported Asymmetric Crypto Algorithms
--------------------------------------
* ``RTE_CRYPTO_ASYM_XFORM_RSA``
* ``RTE_CRYPTO_ASYM_XFORM_MODEX``
Config flags
------------

View File

@ -6,6 +6,7 @@
#define _CPT_MCODE_DEFINES_H_
#include <rte_byteorder.h>
#include <rte_crypto_asym.h>
#include <rte_memory.h>
/*
@ -314,6 +315,14 @@ struct cpt_ctx {
uint8_t auth_key[64];
};
struct cpt_asym_sess_misc {
enum rte_crypto_asym_xform_type xfrm_type;
union {
struct rte_crypto_rsa_xform rsa_ctx;
struct rte_crypto_modex_xform mod_ctx;
};
};
/* Buffer pointer */
typedef struct buf_ptr {
void *vaddr;

View File

@ -0,0 +1,171 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#ifndef _CPT_UCODE_ASYM_H_
#define _CPT_UCODE_ASYM_H_
#include <rte_common.h>
#include <rte_crypto_asym.h>
#include <rte_malloc.h>
#include "cpt_mcode_defines.h"
static __rte_always_inline void
cpt_modex_param_normalize(uint8_t **data, size_t *len)
{
size_t i;
/* Strip leading NUL bytes */
for (i = 0; i < *len; i++) {
if ((*data)[i] != 0)
break;
}
*data += i;
*len -= i;
}
static __rte_always_inline int
cpt_fill_modex_params(struct cpt_asym_sess_misc *sess,
struct rte_crypto_asym_xform *xform)
{
struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
size_t exp_len = xform->modex.exponent.length;
size_t mod_len = xform->modex.modulus.length;
uint8_t *exp = xform->modex.exponent.data;
uint8_t *mod = xform->modex.modulus.data;
cpt_modex_param_normalize(&mod, &mod_len);
cpt_modex_param_normalize(&exp, &exp_len);
if (unlikely(exp_len == 0 || mod_len == 0))
return -EINVAL;
if (unlikely(exp_len > mod_len)) {
CPT_LOG_DP_ERR("Exponent length greater than modulus length is not supported");
return -ENOTSUP;
}
/* Allocate buffer to hold modexp params */
ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
if (ctx->modulus.data == NULL) {
CPT_LOG_DP_ERR("Could not allocate buffer for modex params");
return -ENOMEM;
}
/* Set up modexp prime modulus and private exponent */
memcpy(ctx->modulus.data, mod, mod_len);
ctx->exponent.data = ctx->modulus.data + mod_len;
memcpy(ctx->exponent.data, exp, exp_len);
ctx->modulus.length = mod_len;
ctx->exponent.length = exp_len;
return 0;
}
static __rte_always_inline int
cpt_fill_rsa_params(struct cpt_asym_sess_misc *sess,
struct rte_crypto_asym_xform *xform)
{
struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
size_t mod_len = xfrm_rsa->n.length;
size_t exp_len = xfrm_rsa->e.length;
uint64_t total_size;
size_t len = 0;
/* Make sure key length used is not more than mod_len/2 */
if (qt.p.data != NULL)
len = (((mod_len / 2) < qt.p.length) ? len : qt.p.length);
/* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
total_size = mod_len + exp_len + 5 * len;
/* Allocate buffer to hold all RSA keys */
rsa->n.data = rte_malloc(NULL, total_size, 0);
if (rsa->n.data == NULL) {
CPT_LOG_DP_ERR("Could not allocate buffer for RSA keys");
return -ENOMEM;
}
/* Set up RSA prime modulus and public key exponent */
memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
rsa->e.data = rsa->n.data + mod_len;
memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
/* Private key in quintuple format */
if (len != 0) {
rsa->qt.q.data = rsa->e.data + exp_len;
memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
rsa->qt.q.length = qt.q.length;
rsa->qt.dQ.length = qt.dQ.length;
rsa->qt.p.length = qt.p.length;
rsa->qt.dP.length = qt.dP.length;
rsa->qt.qInv.length = qt.qInv.length;
}
rsa->n.length = mod_len;
rsa->e.length = exp_len;
return 0;
}
static __rte_always_inline int
cpt_fill_asym_session_parameters(struct cpt_asym_sess_misc *sess,
struct rte_crypto_asym_xform *xform)
{
int ret;
sess->xfrm_type = xform->xform_type;
switch (xform->xform_type) {
case RTE_CRYPTO_ASYM_XFORM_RSA:
ret = cpt_fill_rsa_params(sess, xform);
break;
case RTE_CRYPTO_ASYM_XFORM_MODEX:
ret = cpt_fill_modex_params(sess, xform);
break;
default:
CPT_LOG_DP_ERR("Unsupported transform type");
return -ENOTSUP;
}
return ret;
}
static __rte_always_inline void
cpt_free_asym_session_parameters(struct cpt_asym_sess_misc *sess)
{
struct rte_crypto_modex_xform *mod;
struct rte_crypto_rsa_xform *rsa;
switch (sess->xfrm_type) {
case RTE_CRYPTO_ASYM_XFORM_RSA:
rsa = &sess->rsa_ctx;
if (rsa->n.data)
rte_free(rsa->n.data);
break;
case RTE_CRYPTO_ASYM_XFORM_MODEX:
mod = &sess->mod_ctx;
if (mod->modulus.data)
rte_free(mod->modulus.data);
break;
default:
CPT_LOG_DP_ERR("Invalid transform type");
break;
}
}
#endif /* _CPT_UCODE_ASYM_H_ */

View File

@ -6,7 +6,7 @@
#include "otx_cryptodev_capabilities.h"
static const struct rte_cryptodev_capabilities otx_capabilities[] = {
static const struct rte_cryptodev_capabilities otx_sym_capabilities[] = {
/* Symmetric capabilities */
{ /* NULL (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@ -597,8 +597,49 @@ static const struct rte_cryptodev_capabilities otx_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
static const struct rte_cryptodev_capabilities otx_asym_capabilities[] = {
/* Asymmetric capabilities */
{ /* RSA */
.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
{.asym = {
.xform_capa = {
.xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
.op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
{.modlen = {
.min = 17,
.max = 1024,
.increment = 1
}, }
}
}, }
},
{ /* MOD_EXP */
.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
{.asym = {
.xform_capa = {
.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
.op_types = 0,
{.modlen = {
.min = 17,
.max = 1024,
.increment = 1
}, }
}
}, }
},
/* End of asymmetric capabilities */
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
const struct rte_cryptodev_capabilities *
otx_get_capabilities(void)
otx_get_capabilities(uint64_t flags)
{
return otx_capabilities;
if (flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
return otx_asym_capabilities;
else
return otx_sym_capabilities;
}

View File

@ -8,10 +8,9 @@
#include <rte_cryptodev.h>
/*
* Get capabilities list for the device
*
* Get capabilities list for the device, based on device type
*/
const struct rte_cryptodev_capabilities *
otx_get_capabilities(void);
otx_get_capabilities(uint64_t flags);
#endif /* _OTX_CRYPTODEV_CAPABILITIES_H_ */

View File

@ -18,6 +18,7 @@
#include "cpt_pmd_logs.h"
#include "cpt_ucode.h"
#include "cpt_ucode_asym.h"
/* Forward declarations */
@ -105,7 +106,7 @@ otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
if (info != NULL) {
info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
info->feature_flags = dev->feature_flags;
info->capabilities = otx_get_capabilities();
info->capabilities = otx_get_capabilities(info->feature_flags);
info->sym.max_nb_sessions = 0;
info->driver_id = otx_cryptodev_driver_id;
info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
@ -285,6 +286,65 @@ otx_cpt_session_clear(struct rte_cryptodev *dev,
}
}
static unsigned int
otx_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct cpt_asym_sess_misc);
}
static int
otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
struct rte_crypto_asym_xform *xform __rte_unused,
struct rte_cryptodev_asym_session *sess,
struct rte_mempool *pool)
{
struct cpt_asym_sess_misc *priv;
int ret;
CPT_PMD_INIT_FUNC_TRACE();
if (rte_mempool_get(pool, (void **)&priv)) {
CPT_LOG_ERR("Could not allocate session private data");
return -ENOMEM;
}
memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
ret = cpt_fill_asym_session_parameters(priv, xform);
if (ret) {
CPT_LOG_ERR("Could not configure session parameters");
/* Return session to mempool */
rte_mempool_put(pool, priv);
return ret;
}
set_asym_session_private_data(sess, dev->driver_id, priv);
return 0;
}
static void
otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_asym_session *sess)
{
struct cpt_asym_sess_misc *priv;
struct rte_mempool *sess_mp;
CPT_PMD_INIT_FUNC_TRACE();
priv = get_asym_session_private_data(sess, dev->driver_id);
if (priv == NULL)
return;
/* Free resources allocated during session configure */
cpt_free_asym_session_parameters(priv);
memset(priv, 0, otx_cpt_asym_session_size_get(dev));
sess_mp = rte_mempool_from_obj(priv);
set_asym_session_private_data(sess, dev->driver_id, NULL);
rte_mempool_put(sess_mp, priv);
}
static __rte_always_inline int32_t __hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
@ -584,7 +644,11 @@ static struct rte_cryptodev_ops cptvf_ops = {
/* Crypto related operations */
.sym_session_get_size = otx_cpt_get_session_size,
.sym_session_configure = otx_cpt_session_cfg,
.sym_session_clear = otx_cpt_session_clear
.sym_session_clear = otx_cpt_session_clear,
.asym_session_get_size = otx_cpt_asym_session_size_get,
.asym_session_configure = otx_cpt_asym_session_cfg,
.asym_session_clear = otx_cpt_asym_session_clear,
};
int
@ -635,7 +699,8 @@ otx_cpt_dev_create(struct rte_cryptodev *c_dev)
case OTX_CPT_VF_TYPE_AE:
/* Set asymmetric cpt feature flags */
c_dev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED;
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
break;
case OTX_CPT_VF_TYPE_SE:
/* Set symmetric cpt feature flags */