crypto/aesni_mb: support CPU crypto

Add support for CPU crypto mode by introducing required handler.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
This commit is contained in:
Konstantin Ananyev 2020-04-17 16:03:48 +01:00 committed by Akhil Goyal
parent 6738c0a956
commit a2c6d3f34f
6 changed files with 364 additions and 34 deletions

View File

@ -12,6 +12,9 @@ support for utilizing Intel multi buffer library, see the white paper
The AES-NI MB PMD has current only been tested on Fedora 21 64-bit with gcc.
The AES-NI MB PMD supports synchronous mode of operation with
``rte_cryptodev_sym_cpu_crypto_process`` function call.
Features
--------

View File

@ -12,6 +12,7 @@ CPU AVX2 = Y
CPU AVX512 = Y
CPU AESNI = Y
OOP LB In LB Out = Y
CPU crypto = Y
;
; Supported crypto algorithms of the 'aesni_mb' crypto driver.

View File

@ -81,6 +81,7 @@ New Features
* **Updated the AESNI MB crypto PMD.**
* Updated the AESNI MB PMD with AES-256 DOCSIS algorithm.
* Added support for synchronous Crypto burst API.
* **Added handling of mixed crypto algorithms in QAT PMD for GEN2.**

View File

@ -275,6 +275,9 @@ aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
/** device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
extern uint32_t
aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec);
#endif /* _AESNI_MB_PMD_PRIVATE_H_ */

View File

@ -11,6 +11,7 @@
#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_per_lcore.h>
#include "aesni_mb_pmd_private.h"
@ -21,6 +22,12 @@ int aesni_mb_logtype_driver;
#define HMAC_MAX_BLOCK_SIZE 128
static uint8_t cryptodev_driver_id;
/*
* Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
* as we still use JOB based API even for synchronous processing.
*/
static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
typedef void (*hash_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
@ -810,6 +817,119 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
(UINT64_MAX - u_src + u_dst + 1);
}
static inline void
set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
void *iv, void *aad, void *digest, void *udata)
{
/* Set crypto operation */
job->chain_order = session->chain_order;
/* Set cipher parameters */
job->cipher_direction = session->cipher.direction;
job->cipher_mode = session->cipher.mode;
job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
job->iv = iv;
switch (job->hash_alg) {
case AES_XCBC:
job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
job->u.XCBC._k2 = session->auth.xcbc.k2;
job->u.XCBC._k3 = session->auth.xcbc.k3;
job->aes_enc_key_expanded =
session->cipher.expanded_aes_keys.encode;
job->aes_dec_key_expanded =
session->cipher.expanded_aes_keys.decode;
break;
case AES_CCM:
job->u.CCM.aad = (uint8_t *)aad + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
job->aes_enc_key_expanded =
session->cipher.expanded_aes_keys.encode;
job->aes_dec_key_expanded =
session->cipher.expanded_aes_keys.decode;
job->iv++;
break;
case AES_CMAC:
job->u.CMAC._key_expanded = session->auth.cmac.expkey;
job->u.CMAC._skey1 = session->auth.cmac.skey1;
job->u.CMAC._skey2 = session->auth.cmac.skey2;
job->aes_enc_key_expanded =
session->cipher.expanded_aes_keys.encode;
job->aes_dec_key_expanded =
session->cipher.expanded_aes_keys.decode;
break;
case AES_GMAC:
if (session->cipher.mode == GCM) {
job->u.GCM.aad = aad;
job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
} else {
/* For GMAC */
job->u.GCM.aad = buf;
job->u.GCM.aad_len_in_bytes = len;
job->cipher_mode = GCM;
}
job->aes_enc_key_expanded = &session->cipher.gcm_key;
job->aes_dec_key_expanded = &session->cipher.gcm_key;
break;
default:
job->u.HMAC._hashed_auth_key_xor_ipad =
session->auth.pads.inner;
job->u.HMAC._hashed_auth_key_xor_opad =
session->auth.pads.outer;
if (job->cipher_mode == DES3) {
job->aes_enc_key_expanded =
session->cipher.exp_3des_keys.ks_ptr;
job->aes_dec_key_expanded =
session->cipher.exp_3des_keys.ks_ptr;
} else {
job->aes_enc_key_expanded =
session->cipher.expanded_aes_keys.encode;
job->aes_dec_key_expanded =
session->cipher.expanded_aes_keys.decode;
}
}
/*
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
/* Set digest location and length */
job->auth_tag_output = digest;
job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
/* Set IV parameters */
job->iv_len_in_bytes = session->iv.length;
/* Data Parameters */
job->src = buf;
job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
job->msg_len_to_hash_in_bytes = 0;
job->msg_len_to_cipher_in_bytes = 0;
} else {
job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
sofs.ofs.auth.tail;
job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
sofs.ofs.cipher.tail;
}
job->user_data = udata;
}
/**
* Process a crypto operation and complete a JOB_AES_HMAC job structure for
* submission to the multi buffer library for processing.
@ -1102,6 +1222,15 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
return op;
}
static inline void
post_process_mb_sync_job(JOB_AES_HMAC *job)
{
uint32_t *st;
st = job->user_data;
st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
}
/**
* Process a completed JOB_AES_HMAC job and keep processing jobs until
* get_completed_job return NULL
@ -1138,6 +1267,26 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
return processed_jobs;
}
static inline uint32_t
handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
{
uint32_t i;
for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
post_process_mb_sync_job(job);
return i;
}
static inline uint32_t
flush_mb_sync_mgr(MB_MGR *mb_mgr)
{
JOB_AES_HMAC *job;
job = IMB_FLUSH_JOB(mb_mgr);
return handle_completed_sync_jobs(job, mb_mgr);
}
static inline uint16_t
flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
@ -1241,8 +1390,201 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
static MB_MGR *
alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
{
MB_MGR *mb_mgr = alloc_mb_mgr(0);
if (mb_mgr == NULL)
return NULL;
switch (vector_mode) {
case RTE_AESNI_MB_SSE:
init_mb_mgr_sse(mb_mgr);
break;
case RTE_AESNI_MB_AVX:
init_mb_mgr_avx(mb_mgr);
break;
case RTE_AESNI_MB_AVX2:
init_mb_mgr_avx2(mb_mgr);
break;
case RTE_AESNI_MB_AVX512:
init_mb_mgr_avx512(mb_mgr);
break;
default:
AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
free_mb_mgr(mb_mgr);
return NULL;
}
return mb_mgr;
}
static inline void
aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
{
uint32_t i;
for (i = 0; i != vec->num; ++i)
vec->status[i] = err;
}
static inline int
check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
{
/* no multi-seg support with current AESNI-MB PMD */
if (sgl->num != 1)
return ENOTSUP;
else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
return EINVAL;
return 0;
}
static inline JOB_AES_HMAC *
submit_sync_job(MB_MGR *mb_mgr)
{
#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
return IMB_SUBMIT_JOB(mb_mgr);
#else
return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
#endif
}
static inline uint32_t
generate_sync_dgst(struct rte_crypto_sym_vec *vec,
const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
{
uint32_t i, k;
for (i = 0, k = 0; i != vec->num; i++) {
if (vec->status[i] == 0) {
memcpy(vec->digest[i], dgst[i], len);
k++;
}
}
return k;
}
static inline uint32_t
verify_sync_dgst(struct rte_crypto_sym_vec *vec,
const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
{
uint32_t i, k;
for (i = 0, k = 0; i != vec->num; i++) {
if (vec->status[i] == 0) {
if (memcmp(vec->digest[i], dgst[i], len) != 0)
vec->status[i] = EBADMSG;
else
k++;
}
}
return k;
}
uint32_t
aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
{
int32_t ret;
uint32_t i, j, k, len;
void *buf;
JOB_AES_HMAC *job;
MB_MGR *mb_mgr;
struct aesni_mb_private *priv;
struct aesni_mb_session *s;
uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
s = get_sym_session_private_data(sess, dev->driver_id);
if (s == NULL) {
aesni_mb_fill_error_code(vec, EINVAL);
return 0;
}
/* get per-thread MB MGR, create one if needed */
mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
if (mb_mgr == NULL) {
priv = dev->data->dev_private;
mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
if (mb_mgr == NULL) {
aesni_mb_fill_error_code(vec, ENOMEM);
return 0;
}
RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
}
for (i = 0, j = 0, k = 0; i != vec->num; i++) {
ret = check_crypto_sgl(sofs, vec->sgl + i);
if (ret != 0) {
vec->status[i] = ret;
continue;
}
buf = vec->sgl[i].vec[0].base;
len = vec->sgl[i].vec[0].len;
job = IMB_GET_NEXT_JOB(mb_mgr);
if (job == NULL) {
k += flush_mb_sync_mgr(mb_mgr);
job = IMB_GET_NEXT_JOB(mb_mgr);
RTE_ASSERT(job != NULL);
}
/* Submit job for processing */
set_cpu_mb_job_params(job, s, sofs, buf, len,
vec->iv[i], vec->aad[i], tmp_dgst[i],
&vec->status[i]);
job = submit_sync_job(mb_mgr);
j++;
/* handle completed jobs */
k += handle_completed_sync_jobs(job, mb_mgr);
}
/* flush remaining jobs */
while (k != j)
k += flush_mb_sync_mgr(mb_mgr);
/* finish processing for successful jobs: check/update digest */
if (k != 0) {
if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
k = verify_sync_dgst(vec,
(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
s->auth.req_digest_len);
else
k = generate_sync_dgst(vec,
(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
s->auth.req_digest_len);
}
return k;
}
static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
static uint64_t
vec_mode_to_flags(enum aesni_mb_vector_mode mode)
{
switch (mode) {
case RTE_AESNI_MB_SSE:
return RTE_CRYPTODEV_FF_CPU_SSE;
case RTE_AESNI_MB_AVX:
return RTE_CRYPTODEV_FF_CPU_AVX;
case RTE_AESNI_MB_AVX2:
return RTE_CRYPTODEV_FF_CPU_AVX2;
case RTE_AESNI_MB_AVX512:
return RTE_CRYPTODEV_FF_CPU_AVX512;
default:
AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
return 0;
}
}
static int
cryptodev_aesni_mb_create(const char *name,
struct rte_vdev_device *vdev,
@ -1278,7 +1620,8 @@ cryptodev_aesni_mb_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO;
/* Check CPU for support for AES instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
@ -1286,30 +1629,12 @@ cryptodev_aesni_mb_create(const char *name,
else
AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
mb_mgr = alloc_mb_mgr(0);
if (mb_mgr == NULL)
return -ENOMEM;
dev->feature_flags |= vec_mode_to_flags(vector_mode);
switch (vector_mode) {
case RTE_AESNI_MB_SSE:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
init_mb_mgr_sse(mb_mgr);
break;
case RTE_AESNI_MB_AVX:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
init_mb_mgr_avx(mb_mgr);
break;
case RTE_AESNI_MB_AVX2:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
init_mb_mgr_avx2(mb_mgr);
break;
case RTE_AESNI_MB_AVX512:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
init_mb_mgr_avx512(mb_mgr);
break;
default:
AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
goto error_exit;
mb_mgr = alloc_init_mb_mgr(vector_mode);
if (mb_mgr == NULL) {
rte_cryptodev_pmd_destroy(dev);
return -ENOMEM;
}
/* Set vector instructions mode supported */
@ -1321,16 +1646,7 @@ cryptodev_aesni_mb_create(const char *name,
AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
imb_get_version_str());
return 0;
error_exit:
if (mb_mgr)
free_mb_mgr(mb_mgr);
rte_cryptodev_pmd_destroy(dev);
return -1;
}
static int
@ -1379,6 +1695,10 @@ cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
internals = cryptodev->data->dev_private;
free_mb_mgr(internals->mb_mgr);
if (RTE_PER_LCORE(sync_mb_mgr)) {
free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
RTE_PER_LCORE(sync_mb_mgr) = NULL;
}
return rte_cryptodev_pmd_destroy(cryptodev);
}

View File

@ -802,6 +802,8 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = {
.queue_pair_setup = aesni_mb_pmd_qp_setup,
.queue_pair_release = aesni_mb_pmd_qp_release,
.sym_cpu_process = aesni_mb_cpu_crypto_process_bulk,
.sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
.sym_session_configure = aesni_mb_pmd_sym_session_configure,
.sym_session_clear = aesni_mb_pmd_sym_session_clear