crypto/aesni_gcm: migrate to Multi-buffer library

Since Intel Multi Buffer library for IPSec has been updated to
support Scatter Gather List, the AESNI GCM PMD can link
to this library, instead of the ISA-L library.

This move eases the maintenance of the driver, as it will
use the same library as the AESNI MB PMD.
It also adds support for 192-bit keys.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
This commit is contained in:
Pablo de Lara 2017-07-04 01:12:40 +01:00
parent b4add175c0
commit 6f16aab09a
10 changed files with 236 additions and 95 deletions

View File

@ -38,7 +38,6 @@ default_path=$PATH
# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2) # - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
# - DPDK_DEP_ARCHIVE # - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS # - DPDK_DEP_CFLAGS
# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS # - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n]) # - DPDK_DEP_MOFED (y/[n])
# - DPDK_DEP_NUMA ([y]/n) # - DPDK_DEP_NUMA ([y]/n)
@ -121,7 +120,6 @@ reset_env ()
unset CROSS unset CROSS
unset DPDK_DEP_ARCHIVE unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS unset DPDK_DEP_CFLAGS
unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED unset DPDK_DEP_MOFED
unset DPDK_DEP_NUMA unset DPDK_DEP_NUMA
@ -182,7 +180,7 @@ config () # <directory> <target> <options>
sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config
test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \ test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config
test "$DPDK_DEP_ISAL_CRYPTO" != y || \ test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
test -z "$LIBSSO_SNOW3G_PATH" || \ test -z "$LIBSSO_SNOW3G_PATH" || \
sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config

View File

@ -1,5 +1,5 @@
.. BSD LICENSE .. BSD LICENSE
Copyright(c) 2016 Intel Corporation. All rights reserved. Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions modification, are permitted provided that the following conditions
@ -32,8 +32,8 @@ AES-NI GCM Crypto Poll Mode Driver
The AES-NI GCM PMD (**librte_pmd_aesni_gcm**) provides poll mode crypto driver The AES-NI GCM PMD (**librte_pmd_aesni_gcm**) provides poll mode crypto driver
support for utilizing Intel ISA-L crypto library, which provides operation acceleration support for utilizing Intel multi buffer library (see AES-NI Multi-buffer PMD documentation
through the AES-NI instruction sets for AES-GCM authenticated cipher algorithm. to learn more about it, including installation).
Features Features
-------- --------
@ -49,19 +49,51 @@ AEAD algorithms:
* RTE_CRYPTO_AEAD_AES_GCM * RTE_CRYPTO_AEAD_AES_GCM
Limitations
-----------
* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
* Cipher only is not supported.
Installation Installation
------------ ------------
To build DPDK with the AESNI_GCM_PMD the user is required to install To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer
the ``libisal_crypto`` library in the build environment. library from `here <https://github.com/01org/intel-ipsec-mb>`_
For download and more details please visit `<https://github.com/01org/isa-l_crypto>`_. and compile it on their user system before building DPDK.
The latest version of the library supported by this PMD is v0.46, which
can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.46.zip>`_.
.. code-block:: console
make
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
.. _table_aesni_gcm_versions:
.. table:: DPDK and external crypto library version compatibility
============= ================================
DPDK version Crypto library version
============= ================================
16.04 - 16.11 Multi-buffer library 0.43 - 0.44
17.02 - 17.05 ISA-L Crypto v2.18
17.08+ Multi-buffer library 0.46+
============= ================================
Initialization Initialization
-------------- --------------
In order to enable this virtual crypto PMD, user must: In order to enable this virtual crypto PMD, user must:
* Install the ISA-L crypto library (explained in Installation section). * Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where
the library was extracted.
* Build the multi buffer library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base. * Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base.
@ -85,10 +117,3 @@ Example:
.. code-block:: console .. code-block:: console
./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_gcm,socket_id=1,max_nb_sessions=128" ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_gcm,socket_id=1,max_nb_sessions=128"
Limitations
-----------
* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
* Hash only is not supported.
* Cipher only is not supported.

View File

@ -7,7 +7,9 @@
Symmetric crypto = Y Symmetric crypto = Y
Sym operation chaining = Y Sym operation chaining = Y
CPU AESNI = Y CPU AESNI = Y
CPU SSE = Y
CPU AVX = Y
CPU AVX2 = Y
; ;
; Supported crypto algorithms of the 'aesni_gcm' crypto driver. ; Supported crypto algorithms of the 'aesni_gcm' crypto driver.
; ;

View File

@ -124,6 +124,14 @@ New Features
* 12-byte IV on AES Counter Mode, apart from the previous 16-byte IV. * 12-byte IV on AES Counter Mode, apart from the previous 16-byte IV.
* **Updated the AES-NI GCM PMD.**
The AES-NI GCM PMD was migrated from the ISA-L library to the Multi Buffer
library, as the latter library has Scatter Gather List support
now. The migration entailed adding additional support for:
* 192-bit key.
Resolved Issues Resolved Issues
--------------- ---------------

View File

@ -1,6 +1,6 @@
# BSD LICENSE # BSD LICENSE
# #
# Copyright(c) 2016 Intel Corporation. All rights reserved. # Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions # modification, are permitted provided that the following conditions
@ -31,6 +31,9 @@
include $(RTE_SDK)/mk/rte.vars.mk include $(RTE_SDK)/mk/rte.vars.mk
ifneq ($(MAKECMDGOALS),clean) ifneq ($(MAKECMDGOALS),clean)
ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
endif
endif endif
# library name # library name
@ -47,7 +50,9 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies # external library dependencies
LDLIBS += -lisal_crypto CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
# library source files # library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c

View File

@ -1,7 +1,7 @@
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2016 Intel Corporation. All rights reserved. * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -37,26 +37,109 @@
#define LINUX #define LINUX
#endif #endif
#include <isa-l_crypto/aes_gcm.h> #include <gcm_defines.h>
#include <aux_funcs.h>
typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data, /** Supported vector modes */
uint8_t *iv, enum aesni_gcm_vector_mode {
RTE_AESNI_GCM_NOT_SUPPORTED = 0,
RTE_AESNI_GCM_SSE,
RTE_AESNI_GCM_AVX,
RTE_AESNI_GCM_AVX2,
RTE_AESNI_GCM_VECTOR_NUM
};
enum aesni_gcm_key {
AESNI_GCM_KEY_128,
AESNI_GCM_KEY_192,
AESNI_GCM_KEY_256,
AESNI_GCM_KEY_NUM
};
typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
struct gcm_context_data *gcm_ctx_data, uint8_t *out,
const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
const uint8_t *aad, uint64_t aad_len,
uint8_t *auth_tag, uint64_t auth_tag_len);
typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
struct gcm_context_data *gcm_ctx_data,
const uint8_t *iv,
uint8_t const *aad, uint8_t const *aad,
uint64_t aad_len); uint64_t aad_len);
typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data, typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
struct gcm_context_data *gcm_ctx_data,
uint8_t *out, uint8_t *out,
const uint8_t *in, const uint8_t *in,
uint64_t plaintext_len); uint64_t plaintext_len);
typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data, typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
struct gcm_context_data *gcm_ctx_data,
uint8_t *auth_tag, uint8_t *auth_tag,
uint64_t auth_tag_len); uint64_t auth_tag_len);
/** GCM library function pointer table */
struct aesni_gcm_ops { struct aesni_gcm_ops {
aesni_gcm_t enc; /**< GCM encode function pointer */
aesni_gcm_t dec; /**< GCM decode function pointer */
aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
aesni_gcm_init_t init; aesni_gcm_init_t init;
aesni_gcm_update_t update; aesni_gcm_update_t update_enc;
aesni_gcm_update_t update_dec;
aesni_gcm_finalize_t finalize; aesni_gcm_finalize_t finalize;
}; };
#define AES_GCM_FN(keylen, arch) \
aes_gcm_enc_##keylen##_##arch,\
aes_gcm_dec_##keylen##_##arch,\
aes_gcm_pre_##keylen##_##arch,\
aes_gcm_init_##keylen##_##arch,\
aes_gcm_enc_##keylen##_update_##arch,\
aes_gcm_dec_##keylen##_update_##arch,\
aes_gcm_enc_##keylen##_finalize_##arch,
static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
[RTE_AESNI_GCM_NOT_SUPPORTED] = {
[AESNI_GCM_KEY_128] = {NULL},
[AESNI_GCM_KEY_192] = {NULL},
[AESNI_GCM_KEY_256] = {NULL}
},
[RTE_AESNI_GCM_SSE] = {
[AESNI_GCM_KEY_128] = {
AES_GCM_FN(128, sse)
},
[AESNI_GCM_KEY_192] = {
AES_GCM_FN(192, sse)
},
[AESNI_GCM_KEY_256] = {
AES_GCM_FN(256, sse)
}
},
[RTE_AESNI_GCM_AVX] = {
[AESNI_GCM_KEY_128] = {
AES_GCM_FN(128, avx_gen2)
},
[AESNI_GCM_KEY_192] = {
AES_GCM_FN(192, avx_gen2)
},
[AESNI_GCM_KEY_256] = {
AES_GCM_FN(256, avx_gen2)
}
},
[RTE_AESNI_GCM_AVX2] = {
[AESNI_GCM_KEY_128] = {
AES_GCM_FN(128, avx_gen4)
},
[AESNI_GCM_KEY_192] = {
AES_GCM_FN(192, avx_gen4)
},
[AESNI_GCM_KEY_256] = {
AES_GCM_FN(256, avx_gen4)
}
}
};
#endif /* _AESNI_GCM_OPS_H_ */ #endif /* _AESNI_GCM_OPS_H_ */

View File

@ -43,37 +43,11 @@
#include "aesni_gcm_pmd_private.h" #include "aesni_gcm_pmd_private.h"
/** GCM encode functions pointer table */
static const struct aesni_gcm_ops aesni_gcm_enc[] = {
[AESNI_GCM_KEY_128] = {
aesni_gcm128_init,
aesni_gcm128_enc_update,
aesni_gcm128_enc_finalize
},
[AESNI_GCM_KEY_256] = {
aesni_gcm256_init,
aesni_gcm256_enc_update,
aesni_gcm256_enc_finalize
}
};
/** GCM decode functions pointer table */
static const struct aesni_gcm_ops aesni_gcm_dec[] = {
[AESNI_GCM_KEY_128] = {
aesni_gcm128_init,
aesni_gcm128_dec_update,
aesni_gcm128_dec_finalize
},
[AESNI_GCM_KEY_256] = {
aesni_gcm256_init,
aesni_gcm256_dec_update,
aesni_gcm256_dec_finalize
}
};
/** Parse crypto xform chain and set private session parameters */ /** Parse crypto xform chain and set private session parameters */
int int
aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform) const struct rte_crypto_sym_xform *xform)
{ {
const struct rte_crypto_sym_xform *auth_xform; const struct rte_crypto_sym_xform *auth_xform;
@ -145,20 +119,21 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
/* Check key length and calculate GCM pre-compute. */ /* Check key length and calculate GCM pre-compute. */
switch (key_length) { switch (key_length) {
case 16: case 16:
aesni_gcm128_pre(key, &sess->gdata);
sess->key = AESNI_GCM_KEY_128; sess->key = AESNI_GCM_KEY_128;
break;
case 24:
sess->key = AESNI_GCM_KEY_192;
break; break;
case 32: case 32:
aesni_gcm256_pre(key, &sess->gdata);
sess->key = AESNI_GCM_KEY_256; sess->key = AESNI_GCM_KEY_256;
break; break;
default: default:
GCM_LOG_ERR("Unsupported key length"); GCM_LOG_ERR("Unsupported key length");
return -EINVAL; return -EINVAL;
} }
gcm_ops[sess->key].precomp(key, &sess->gdata_key);
/* Digest check */ /* Digest check */
if (digest_length != 16 && if (digest_length != 16 &&
digest_length != 12 && digest_length != 12 &&
@ -193,7 +168,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
sess = (struct aesni_gcm_session *) sess = (struct aesni_gcm_session *)
((struct rte_cryptodev_sym_session *)_sess)->_private; ((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_gcm_set_session_parameters(sess, if (unlikely(aesni_gcm_set_session_parameters(qp->ops, sess,
sym_op->xform) != 0)) { sym_op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess); rte_mempool_put(qp->sess_mp, _sess);
sess = NULL; sess = NULL;
@ -203,8 +178,8 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
} }
/** /**
* Process a crypto operation and complete a JOB_AES_HMAC job structure for * Process a crypto operation, calling
* submission to the multi buffer library for processing. * the GCM API from the multi buffer library.
* *
* @param qp queue pair * @param qp queue pair
* @param op symmetric crypto operation * @param op symmetric crypto operation
@ -214,7 +189,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
* *
*/ */
static int static int
process_gcm_crypto_op(struct rte_crypto_op *op, process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
struct aesni_gcm_session *session) struct aesni_gcm_session *session)
{ {
uint8_t *src, *dst; uint8_t *src, *dst;
@ -279,12 +254,14 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
aesni_gcm_enc[session->key].init(&session->gdata, qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr, iv_ptr,
sym_op->aead.aad.data, sym_op->aead.aad.data,
(uint64_t)session->aad_length); (uint64_t)session->aad_length);
aesni_gcm_enc[session->key].update(&session->gdata, dst, src, qp->ops[session->key].update_enc(&session->gdata_key,
&qp->gdata_ctx, dst, src,
(uint64_t)part_len); (uint64_t)part_len);
total_len = data_length - part_len; total_len = data_length - part_len;
@ -298,13 +275,14 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
part_len = (m_src->data_len < total_len) ? part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len; m_src->data_len : total_len;
aesni_gcm_enc[session->key].update(&session->gdata, qp->ops[session->key].update_enc(&session->gdata_key,
dst, src, &qp->gdata_ctx, dst, src,
(uint64_t)part_len); (uint64_t)part_len);
total_len -= part_len; total_len -= part_len;
} }
aesni_gcm_enc[session->key].finalize(&session->gdata, qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
sym_op->aead.digest.data, sym_op->aead.digest.data,
(uint64_t)session->digest_length); (uint64_t)session->digest_length);
} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
@ -317,12 +295,14 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
return -1; return -1;
} }
aesni_gcm_dec[session->key].init(&session->gdata, qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr, iv_ptr,
sym_op->aead.aad.data, sym_op->aead.aad.data,
(uint64_t)session->aad_length); (uint64_t)session->aad_length);
aesni_gcm_dec[session->key].update(&session->gdata, dst, src, qp->ops[session->key].update_dec(&session->gdata_key,
&qp->gdata_ctx, dst, src,
(uint64_t)part_len); (uint64_t)part_len);
total_len = data_length - part_len; total_len = data_length - part_len;
@ -336,21 +316,25 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
part_len = (m_src->data_len < total_len) ? part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len; m_src->data_len : total_len;
aesni_gcm_dec[session->key].update(&session->gdata, qp->ops[session->key].update_dec(&session->gdata_key,
&qp->gdata_ctx,
dst, src, dst, src,
(uint64_t)part_len); (uint64_t)part_len);
total_len -= part_len; total_len -= part_len;
} }
aesni_gcm_dec[session->key].finalize(&session->gdata, qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
auth_tag, auth_tag,
(uint64_t)session->digest_length); (uint64_t)session->digest_length);
} else if (session->op == AESNI_GMAC_OP_GENERATE) { } else if (session->op == AESNI_GMAC_OP_GENERATE) {
aesni_gcm_enc[session->key].init(&session->gdata, qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr, iv_ptr,
src, src,
(uint64_t)data_length); (uint64_t)data_length);
aesni_gcm_enc[session->key].finalize(&session->gdata, qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
sym_op->auth.digest.data, sym_op->auth.digest.data,
(uint64_t)session->digest_length); (uint64_t)session->digest_length);
} else { /* AESNI_GMAC_OP_VERIFY */ } else { /* AESNI_GMAC_OP_VERIFY */
@ -363,12 +347,14 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
return -1; return -1;
} }
aesni_gcm_dec[session->key].init(&session->gdata, qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr, iv_ptr,
src, src,
(uint64_t)data_length); (uint64_t)data_length);
aesni_gcm_dec[session->key].finalize(&session->gdata, qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
auth_tag, auth_tag,
(uint64_t)session->digest_length); (uint64_t)session->digest_length);
} }
@ -468,7 +454,7 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
break; break;
} }
retval = process_gcm_crypto_op(ops[i], sess); retval = process_gcm_crypto_op(qp, ops[i], sess);
if (retval < 0) { if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++; qp->qp_stats.dequeue_err_count++;
@ -507,6 +493,7 @@ aesni_gcm_create(const char *name,
{ {
struct rte_cryptodev *dev; struct rte_cryptodev *dev;
struct aesni_gcm_private *internals; struct aesni_gcm_private *internals;
enum aesni_gcm_vector_mode vector_mode;
if (init_params->name[0] == '\0') if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name), snprintf(init_params->name, sizeof(init_params->name),
@ -518,6 +505,14 @@ aesni_gcm_create(const char *name,
return -EFAULT; return -EFAULT;
} }
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_GCM_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_GCM_AVX;
else
vector_mode = RTE_AESNI_GCM_SSE;
dev = rte_cryptodev_vdev_pmd_init(init_params->name, dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct aesni_gcm_private), init_params->socket_id, sizeof(struct aesni_gcm_private), init_params->socket_id,
vdev); vdev);
@ -538,8 +533,24 @@ aesni_gcm_create(const char *name,
RTE_CRYPTODEV_FF_CPU_AESNI | RTE_CRYPTODEV_FF_CPU_AESNI |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
switch (vector_mode) {
case RTE_AESNI_GCM_SSE:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
break;
case RTE_AESNI_GCM_AVX:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
break;
case RTE_AESNI_GCM_AVX2:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
break;
default:
break;
}
internals = dev->data->dev_private; internals = dev->data->dev_private;
internals->vector_mode = vector_mode;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions; internals->max_nb_sessions = init_params->max_nb_sessions;

View File

@ -49,7 +49,7 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.key_size = { .key_size = {
.min = 16, .min = 16,
.max = 32, .max = 32,
.increment = 16 .increment = 8
}, },
.digest_size = { .digest_size = {
.min = 8, .min = 8,
@ -75,7 +75,7 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.key_size = { .key_size = {
.min = 16, .min = 16,
.max = 32, .max = 32,
.increment = 16 .increment = 8
}, },
.digest_size = { .digest_size = {
.min = 8, .min = 8,
@ -233,6 +233,7 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int socket_id) int socket_id)
{ {
struct aesni_gcm_qp *qp = NULL; struct aesni_gcm_qp *qp = NULL;
struct aesni_gcm_private *internals = dev->data->dev_private;
/* Free memory prior to re-allocation if needed. */ /* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL) if (dev->data->queue_pairs[qp_id] != NULL)
@ -250,6 +251,8 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup; goto qp_setup_cleanup;
qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id); qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL) if (qp->processed_pkts == NULL)
@ -300,15 +303,18 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni gcm session from a crypto xform chain */ /** Configure a aesni gcm session from a crypto xform chain */
static void * static void *
aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess) struct rte_crypto_sym_xform *xform, void *sess)
{ {
struct aesni_gcm_private *internals = dev->data->dev_private;
if (unlikely(sess == NULL)) { if (unlikely(sess == NULL)) {
GCM_LOG_ERR("invalid session struct"); GCM_LOG_ERR("invalid session struct");
return NULL; return NULL;
} }
if (aesni_gcm_set_session_parameters(sess, xform) != 0) { if (aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
sess, xform) != 0) {
GCM_LOG_ERR("failed configure session parameters"); GCM_LOG_ERR("failed configure session parameters");
return NULL; return NULL;
} }

View File

@ -58,6 +58,8 @@
/** private data structure for each virtual AESNI GCM device */ /** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private { struct aesni_gcm_private {
enum aesni_gcm_vector_mode vector_mode;
/**< Vector mode */
unsigned max_nb_queue_pairs; unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */ /**< Max number of queue pairs supported by device */
unsigned max_nb_sessions; unsigned max_nb_sessions;
@ -65,16 +67,20 @@ struct aesni_gcm_private {
}; };
struct aesni_gcm_qp { struct aesni_gcm_qp {
const struct aesni_gcm_ops *ops;
/**< Architecture dependent function pointer table of the gcm APIs */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
/**< GCM parameters */
struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
/**< Queue pair statistics */
struct rte_mempool *sess_mp;
/**< Session Mempool */
uint16_t id; uint16_t id;
/**< Queue Pair Identifier */ /**< Queue Pair Identifier */
char name[RTE_CRYPTODEV_NAME_LEN]; char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */ /**< Unique Queue Pair Name */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
} __rte_cache_aligned; } __rte_cache_aligned;
@ -85,11 +91,6 @@ enum aesni_gcm_operation {
AESNI_GMAC_OP_VERIFY AESNI_GMAC_OP_VERIFY
}; };
enum aesni_gcm_key {
AESNI_GCM_KEY_128,
AESNI_GCM_KEY_256
};
/** AESNI GCM private session structure */ /** AESNI GCM private session structure */
struct aesni_gcm_session { struct aesni_gcm_session {
struct { struct {
@ -105,7 +106,7 @@ struct aesni_gcm_session {
/**< GCM operation type */ /**< GCM operation type */
enum aesni_gcm_key key; enum aesni_gcm_key key;
/**< GCM key type */ /**< GCM key type */
struct gcm_data gdata __rte_cache_aligned; struct gcm_key_data gdata_key;
/**< GCM parameters */ /**< GCM parameters */
}; };
@ -120,7 +121,8 @@ struct aesni_gcm_session {
* - On failure returns error code < 0 * - On failure returns error code < 0
*/ */
extern int extern int
aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform); const struct rte_crypto_sym_xform *xform);

View File

@ -147,7 +147,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y) ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lisal_crypto _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto