crypto/nitrox: support AES-GCM

This patch adds AES-GCM AEAD algorithm.

Signed-off-by: Nagadheeraj Rottela <rnagadheeraj@marvell.com>
This commit is contained in:
Nagadheeraj Rottela 2020-10-09 11:27:24 +05:30 committed by Akhil Goyal
parent 4edede7bc6
commit 93ba4a6e17
7 changed files with 272 additions and 38 deletions

View File

@ -34,6 +34,9 @@ SHA256 HMAC = Y
; Supported AEAD algorithms of the 'nitrox' crypto driver.
;
[AEAD]
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
;
; Supported Asymmetric algorithms of the 'nitrox' crypto driver.

View File

@ -26,6 +26,10 @@ Hash algorithms:
* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
Supported AEAD algorithms:
* ``RTE_CRYPTO_AEAD_AES_GCM``
Limitations
-----------

View File

@ -156,6 +156,10 @@ New Features
* Added support for AES-ECB 128, 192 and 256 in aesni_mb PMD.
* **Updated Marvell NITROX symmetric crypto PMD.**
* Added AES-GCM support.
* **Updated the OCTEON TX2 crypto PMD.**
* Updated the OCTEON TX2 crypto PMD lookaside protocol offload for IPsec with

View File

@ -20,6 +20,7 @@
#define NPS_PKT_IN_INSTR_SIZE 64
#define IV_FROM_DPTR 1
#define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
#define FLEXI_CRYPTO_MAX_AAD_LEN 512
#define AES_KEYSIZE_128 16
#define AES_KEYSIZE_192 24
#define AES_KEYSIZE_256 32
@ -297,6 +298,9 @@ get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
}
}
break;
case RTE_CRYPTO_SYM_XFORM_AEAD:
res = NITROX_CHAIN_COMBINED;
break;
default:
break;
}
@ -431,17 +435,17 @@ get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
}
static bool
auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform,
struct flexi_crypto_context *fctx)
auth_key_is_valid(const uint8_t *data, uint16_t length,
struct flexi_crypto_context *fctx)
{
if (unlikely(!xform->key.data && xform->key.length)) {
if (unlikely(!data && length)) {
NITROX_LOG(ERR, "Invalid auth key\n");
return false;
}
if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) {
if (unlikely(length > sizeof(fctx->auth.opad))) {
NITROX_LOG(ERR, "Invalid auth key length %d\n",
xform->key.length);
length);
return false;
}
@ -459,11 +463,10 @@ configure_auth_ctx(struct rte_crypto_auth_xform *xform,
if (unlikely(type == AUTH_INVALID))
return -ENOTSUP;
if (unlikely(!auth_key_digest_is_valid(xform, fctx)))
if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
fctx)))
return -EINVAL;
ctx->auth_op = xform->op;
ctx->auth_algo = xform->algo;
ctx->digest_length = xform->digest_length;
fctx->flags = rte_be_to_cpu_64(fctx->flags);
@ -476,6 +479,56 @@ configure_auth_ctx(struct rte_crypto_auth_xform *xform,
return 0;
}
static int
configure_aead_ctx(struct rte_crypto_aead_xform *xform,
struct nitrox_crypto_ctx *ctx)
{
int aes_keylen;
struct flexi_crypto_context *fctx = &ctx->fctx;
if (unlikely(xform->aad_length > FLEXI_CRYPTO_MAX_AAD_LEN)) {
NITROX_LOG(ERR, "AAD length %d not supported\n",
xform->aad_length);
return -ENOTSUP;
}
if (unlikely(xform->algo != RTE_CRYPTO_AEAD_AES_GCM))
return -ENOTSUP;
aes_keylen = flexi_aes_keylen(xform->key.length, true);
if (unlikely(aes_keylen < 0))
return -EINVAL;
if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
fctx)))
return -EINVAL;
if (unlikely(xform->iv.length > MAX_IV_LEN))
return -EINVAL;
fctx->flags = rte_be_to_cpu_64(fctx->flags);
fctx->w0.cipher_type = CIPHER_AES_GCM;
fctx->w0.aes_keylen = aes_keylen;
fctx->w0.iv_source = IV_FROM_DPTR;
fctx->w0.hash_type = AUTH_NULL;
fctx->w0.auth_input_type = 1;
fctx->w0.mac_len = xform->digest_length;
fctx->flags = rte_cpu_to_be_64(fctx->flags);
memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
memset(&fctx->auth, 0, sizeof(fctx->auth));
memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
ctx->req_op = (xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
ctx->iv.offset = xform->iv.offset;
ctx->iv.length = xform->iv.length;
ctx->digest_length = xform->digest_length;
ctx->aad_length = xform->aad_length;
return 0;
}
static int
nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
struct rte_crypto_sym_xform *xform,
@ -486,6 +539,8 @@ nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
struct nitrox_crypto_ctx *ctx;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
struct rte_crypto_auth_xform *auth_xform = NULL;
struct rte_crypto_aead_xform *aead_xform = NULL;
int ret = -EINVAL;
if (rte_mempool_get(mempool, &mp_obj)) {
NITROX_LOG(ERR, "Couldn't allocate context\n");
@ -503,8 +558,12 @@ nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
auth_xform = &xform->auth;
cipher_xform = &xform->next->cipher;
break;
case NITROX_CHAIN_COMBINED:
aead_xform = &xform->aead;
break;
default:
NITROX_LOG(ERR, "Crypto chain not supported\n");
ret = -ENOTSUP;
goto err;
}
@ -518,12 +577,17 @@ nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
goto err;
}
if (aead_xform && unlikely(configure_aead_ctx(aead_xform, ctx))) {
NITROX_LOG(ERR, "Failed to configure aead ctx\n");
goto err;
}
ctx->iova = rte_mempool_virt2iova(ctx);
set_sym_session_private_data(sess, cdev->driver_id, ctx);
return 0;
err:
rte_mempool_put(mempool, mp_obj);
return -EINVAL;
return ret;
}
static void

View File

@ -108,6 +108,36 @@ static const struct rte_cryptodev_capabilities nitrox_capabilities[] = {
}, }
}, }
},
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 1,
.max = 16,
.increment = 1
},
.aad_size = {
.min = 0,
.max = 512,
.increment = 1
},
.iv_size = {
.min = 12,
.max = 16,
.increment = 4
},
}, }
}, }
},
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};

View File

@ -11,6 +11,7 @@
#define AES_MAX_KEY_SIZE 32
#define AES_BLOCK_SIZE 16
#define AES_GCM_SALT_SIZE 4
enum nitrox_chain {
NITROX_CHAIN_CIPHER_ONLY,
@ -69,14 +70,14 @@ struct flexi_crypto_context {
struct nitrox_crypto_ctx {
struct flexi_crypto_context fctx;
enum nitrox_chain nitrox_chain;
enum rte_crypto_auth_operation auth_op;
enum rte_crypto_auth_algorithm auth_algo;
struct {
uint16_t offset;
uint16_t length;
} iv;
rte_iova_t iova;
uint8_t salt[AES_GCM_SALT_SIZE];
uint16_t digest_length;
uint16_t aad_length;
uint8_t opcode;
uint8_t req_op;
};

View File

@ -238,12 +238,13 @@ create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
}
static void
softreq_copy_iv(struct nitrox_softreq *sr)
softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size)
{
sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
sr->ctx->iv.offset);
sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, sr->ctx->iv.offset);
sr->iv.len = sr->ctx->iv.length;
uint16_t offset = sr->ctx->iv.offset + salt_size;
sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *, offset);
sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, offset);
sr->iv.len = sr->ctx->iv.length - salt_size;
}
static int
@ -254,7 +255,7 @@ extract_cipher_auth_digest(struct nitrox_softreq *sr,
struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
op->sym->m_src;
if (sr->ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
unlikely(!op->sym->auth.digest.data))
return -EINVAL;
@ -352,6 +353,13 @@ create_cipher_auth_sglist(struct nitrox_softreq *sr,
if (unlikely(auth_only_len < 0))
return -EINVAL;
if (unlikely(
op->sym->cipher.data.offset + op->sym->cipher.data.length !=
op->sym->auth.data.offset + op->sym->auth.data.length)) {
NITROX_LOG(ERR, "Auth only data after cipher data not supported\n");
return -ENOTSUP;
}
err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
auth_only_len);
if (unlikely(err))
@ -365,6 +373,41 @@ create_cipher_auth_sglist(struct nitrox_softreq *sr,
return 0;
}
static int
create_combined_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
struct rte_mbuf *mbuf)
{
struct rte_crypto_op *op = sr->op;
fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
fill_sglist(sgtbl, sr->ctx->aad_length, op->sym->aead.aad.phys_addr,
op->sym->aead.aad.data);
return create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
op->sym->cipher.data.length);
}
static int
create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
struct rte_mbuf *mbuf)
{
int err;
switch (sr->ctx->nitrox_chain) {
case NITROX_CHAIN_CIPHER_AUTH:
case NITROX_CHAIN_AUTH_CIPHER:
err = create_cipher_auth_sglist(sr, sgtbl, mbuf);
break;
case NITROX_CHAIN_COMBINED:
err = create_combined_sglist(sr, sgtbl, mbuf);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static void
create_sgcomp(struct nitrox_sgtable *sgtbl)
{
@ -383,17 +426,16 @@ create_sgcomp(struct nitrox_sgtable *sgtbl)
}
static int
create_cipher_auth_inbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
int err;
struct nitrox_crypto_ctx *ctx = sr->ctx;
err = create_cipher_auth_sglist(sr, &sr->in, sr->op->sym->m_src);
err = create_aead_sglist(sr, &sr->in, sr->op->sym->m_src);
if (unlikely(err))
return err;
if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
if (ctx->req_op == NITROX_OP_DECRYPT)
fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
create_sgcomp(&sr->in);
@ -402,25 +444,24 @@ create_cipher_auth_inbuf(struct nitrox_softreq *sr,
}
static int
create_cipher_auth_oop_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
create_aead_oop_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
int err;
struct nitrox_crypto_ctx *ctx = sr->ctx;
err = create_cipher_auth_sglist(sr, &sr->out, sr->op->sym->m_dst);
err = create_aead_sglist(sr, &sr->out, sr->op->sym->m_dst);
if (unlikely(err))
return err;
if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
if (ctx->req_op == NITROX_OP_ENCRYPT)
fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
return 0;
}
static void
create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
create_aead_inplace_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
{
int i, cnt;
struct nitrox_crypto_ctx *ctx = sr->ctx;
@ -433,17 +474,16 @@ create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
}
sr->out.map_bufs_cnt = cnt;
if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
if (ctx->req_op == NITROX_OP_ENCRYPT) {
fill_sglist(&sr->out, digest->len, digest->iova,
digest->virt);
} else if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
} else if (ctx->req_op == NITROX_OP_DECRYPT) {
sr->out.map_bufs_cnt--;
}
}
static int
create_cipher_auth_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
struct rte_crypto_op *op = sr->op;
int cnt = 0;
@ -458,11 +498,11 @@ create_cipher_auth_outbuf(struct nitrox_softreq *sr,
if (op->sym->m_dst) {
int err;
err = create_cipher_auth_oop_outbuf(sr, digest);
err = create_aead_oop_outbuf(sr, digest);
if (unlikely(err))
return err;
} else {
create_cipher_auth_inplace_outbuf(sr, digest);
create_aead_inplace_outbuf(sr, digest);
}
cnt = sr->out.map_bufs_cnt;
@ -516,16 +556,16 @@ process_cipher_auth_data(struct nitrox_softreq *sr)
int err;
struct nitrox_sglist digest;
softreq_copy_iv(sr);
softreq_copy_iv(sr, 0);
err = extract_cipher_auth_digest(sr, &digest);
if (unlikely(err))
return err;
err = create_cipher_auth_inbuf(sr, &digest);
err = create_aead_inbuf(sr, &digest);
if (unlikely(err))
return err;
err = create_cipher_auth_outbuf(sr, &digest);
err = create_aead_outbuf(sr, &digest);
if (unlikely(err))
return err;
@ -534,6 +574,86 @@ process_cipher_auth_data(struct nitrox_softreq *sr)
return 0;
}
static int
softreq_copy_salt(struct nitrox_softreq *sr)
{
struct nitrox_crypto_ctx *ctx = sr->ctx;
uint8_t *addr;
if (unlikely(ctx->iv.length < AES_GCM_SALT_SIZE)) {
NITROX_LOG(ERR, "Invalid IV length %d\n", ctx->iv.length);
return -EINVAL;
}
addr = rte_crypto_op_ctod_offset(sr->op, uint8_t *, ctx->iv.offset);
if (!memcmp(ctx->salt, addr, AES_GCM_SALT_SIZE))
return 0;
memcpy(ctx->salt, addr, AES_GCM_SALT_SIZE);
memcpy(ctx->fctx.crypto.iv, addr, AES_GCM_SALT_SIZE);
return 0;
}
static int
extract_combined_digest(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
struct rte_crypto_op *op = sr->op;
struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
op->sym->m_src;
digest->len = sr->ctx->digest_length;
if (op->sym->aead.digest.data) {
digest->iova = op->sym->aead.digest.phys_addr;
digest->virt = op->sym->aead.digest.data;
return 0;
}
if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->aead.data.offset +
op->sym->aead.data.length + digest->len))
return -EINVAL;
digest->iova = rte_pktmbuf_iova_offset(mdst,
op->sym->aead.data.offset +
op->sym->aead.data.length);
digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
op->sym->aead.data.offset +
op->sym->aead.data.length);
return 0;
}
static int
process_combined_data(struct nitrox_softreq *sr)
{
int err;
struct nitrox_sglist digest;
struct rte_crypto_op *op = sr->op;
err = softreq_copy_salt(sr);
if (unlikely(err))
return err;
softreq_copy_iv(sr, AES_GCM_SALT_SIZE);
err = extract_combined_digest(sr, &digest);
if (unlikely(err))
return err;
err = create_aead_inbuf(sr, &digest);
if (unlikely(err))
return err;
err = create_aead_outbuf(sr, &digest);
if (unlikely(err))
return err;
create_aead_gph(op->sym->aead.data.length, sr->iv.len,
op->sym->aead.data.length + sr->ctx->aad_length,
&sr->gph);
return 0;
}
static int
process_softreq(struct nitrox_softreq *sr)
{
@ -545,6 +665,9 @@ process_softreq(struct nitrox_softreq *sr)
case NITROX_CHAIN_AUTH_CIPHER:
err = process_cipher_auth_data(sr);
break;
case NITROX_CHAIN_COMBINED:
err = process_combined_data(sr);
break;
default:
err = -EINVAL;
break;
@ -558,10 +681,15 @@ nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
struct nitrox_crypto_ctx *ctx,
struct nitrox_softreq *sr)
{
int err;
softreq_init(sr, sr->iova);
sr->ctx = ctx;
sr->op = op;
process_softreq(sr);
err = process_softreq(sr);
if (unlikely(err))
return err;
create_se_instr(sr, qno);
sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
return 0;
@ -577,7 +705,7 @@ nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
cc = *(volatile uint64_t *)(&sr->resp.completion);
orh = *(volatile uint64_t *)(&sr->resp.orh);
if (cc != PENDING_SIG)
err = 0;
err = orh & 0xff;
else if ((orh != PENDING_SIG) && (orh & 0xff))
err = orh & 0xff;
else if (rte_get_timer_cycles() >= sr->timeout)