cryptodev: introduce CPU crypto API

Add new API allowing to process crypto operations in a synchronous
manner. Operations are performed on a set of SG arrays.

Cryptodevs which allows CPU crypto operation mode have to
use RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO capability.

Add a helper method to easily convert mbufs to a SGL form.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Signed-off-by: Marcin Smoczynski <marcinx.smoczynski@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Marcin Smoczynski 2020-02-04 14:12:51 +01:00 committed by Akhil Goyal
parent d5a9ea551f
commit 7adf992fb9
8 changed files with 244 additions and 5 deletions

View File

@ -27,6 +27,7 @@ RSA PRIV OP KEY EXP =
RSA PRIV OP KEY QT =
Digest encrypted =
Asymmetric sessionless =
CPU crypto =
;
; Supported crypto algorithms of a default crypto driver.

View File

@ -1,5 +1,5 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2016-2017 Intel Corporation.
Copyright(c) 2016-2020 Intel Corporation.
Cryptography Device Library
===========================
@ -600,6 +600,37 @@ chain.
};
};
Synchronous mode
----------------
Some cryptodevs support synchronous mode alongside with a standard asynchronous
mode. In that case operations are performed directly when calling
``rte_cryptodev_sym_cpu_crypto_process`` method instead of enqueuing and
dequeuing an operation before. This mode of operation allows cryptodevs which
utilize CPU cryptographic acceleration to have significant performance boost
comparing to standard asynchronous approach. Cryptodevs supporting synchronous
mode have ``RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO`` feature flag set.
To perform a synchronous operation a call to
``rte_cryptodev_sym_cpu_crypto_process`` has to be made with vectorized
operation descriptor (``struct rte_crypto_sym_vec``) containing:
- ``num`` - number of operations to perform,
- pointer to an array of size ``num`` containing a scatter-gather list
descriptors of performed operations (``struct rte_crypto_sgl``). Each instance
of ``struct rte_crypto_sgl`` consists of a number of segments and a pointer to
an array of segment descriptors ``struct rte_crypto_vec``;
- pointers to arrays of size ``num`` containing IV, AAD and digest information,
- pointer to an array of size ``num`` where status information will be stored
for each operation.
Function returns a number of successfully completed operations and sets
appropriate status number for each operation in the status array provided as
a call argument. Status different than zero must be treated as error.
For more details, e.g. how to convert an mbuf to an SGL, please refer to an
example usage in the IPsec library implementation.
Sample code
-----------

View File

@ -130,6 +130,12 @@ New Features
* ECPM (Elliptic Curve Point Multiplication) is added to
asymmetric crypto library specifications.
* **Added synchronous Crypto burst API.**
A new API is introduced in crypto library to handle synchronous cryptographic
operations allowing to achieve performance gain for cryptodevs which use
CPU based acceleration, such as Intel AES-NI.
* **Added handling of mixed algorithms in encrypted digest requests in QAT PMD.**
Added handling of mixed algorithms in encrypted digest hash-cipher

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2016-2019 Intel Corporation
* Copyright(c) 2016-2020 Intel Corporation
*/
#ifndef _RTE_CRYPTO_SYM_H_
@ -25,6 +25,67 @@ extern "C" {
#include <rte_mempool.h>
#include <rte_common.h>
/**
* Crypto IO Vector (in analogy with struct iovec)
* Supposed be used to pass input/output data buffers for crypto data-path
* functions.
*/
struct rte_crypto_vec {
/** virtual address of the data buffer */
void *base;
/** IOVA of the data buffer */
rte_iova_t iova;
/** length of the data buffer */
uint32_t len;
};
/**
* Crypto scatter-gather list descriptor. Consists of a pointer to an array
* of Crypto IO vectors with its size.
*/
struct rte_crypto_sgl {
/** start of an array of vectors */
struct rte_crypto_vec *vec;
/** size of an array of vectors */
uint32_t num;
};
/**
* Synchronous operation descriptor.
* Supposed to be used with CPU crypto API call.
*/
struct rte_crypto_sym_vec {
/** array of SGL vectors */
struct rte_crypto_sgl *sgl;
/** array of pointers to IV */
void **iv;
/** array of pointers to AAD */
void **aad;
/** array of pointers to digest */
void **digest;
/**
* array of statuses for each operation:
* - 0 on success
* - errno on error
*/
int32_t *status;
/** number of operations to perform */
uint32_t num;
};
/**
* used for cpu_crypto_process_bulk() to specify head/tail offsets
* for auth/cipher processing.
*/
union rte_crypto_sym_ofs {
uint64_t raw;
struct {
struct {
uint16_t head;
uint16_t tail;
} auth, cipher;
} ofs;
};
/** Symmetric Cipher Algorithms */
enum rte_crypto_cipher_algorithm {
@ -789,6 +850,73 @@ __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
return 0;
}
/**
* Converts portion of mbuf data into a vector representation.
* Each segment will be represented as a separate entry in *vec* array.
* Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
* @param mb
* Pointer to the *rte_mbuf* object.
* @param ofs
* Offset within mbuf data to start with.
* @param len
* Length of data to represent.
* @param vec
* @param num
* @return
* - number of successfully filled entries in *vec* array.
* - negative number of elements in *vec* array required.
*/
__rte_experimental
static inline int
rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
struct rte_crypto_vec vec[], uint32_t num)
{
uint32_t i;
struct rte_mbuf *nseg;
uint32_t left;
uint32_t seglen;
/* assuming that requested data starts in the first segment */
RTE_ASSERT(mb->data_len > ofs);
if (mb->nb_segs > num)
return -mb->nb_segs;
vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
/* whole data lies in the first segment */
seglen = mb->data_len - ofs;
if (len <= seglen) {
vec[0].len = len;
return 1;
}
/* data spread across segments */
vec[0].len = seglen;
left = len - seglen;
for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
vec[i].base = rte_pktmbuf_mtod(nseg, void *);
vec[i].iova = rte_pktmbuf_iova(nseg);
seglen = nseg->data_len;
if (left <= seglen) {
/* whole requested data is completed */
vec[i].len = left;
left = 0;
break;
}
/* use whole segment */
vec[i].len = seglen;
left -= seglen;
}
RTE_ASSERT(left == 0);
return i + 1;
}
#ifdef __cplusplus
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2015-2017 Intel Corporation
* Copyright(c) 2015-2020 Intel Corporation
*/
#include <sys/types.h>
@ -493,6 +493,8 @@ rte_cryptodev_get_feature_name(uint64_t flag)
return "RSA_PRIV_OP_KEY_QT";
case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
return "DIGEST_ENCRYPTED";
case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
return "SYM_CPU_CRYPTO";
default:
return NULL;
}
@ -1618,6 +1620,37 @@ rte_cryptodev_sym_session_get_user_data(
return (void *)(sess->sess_data + sess->nb_drivers);
}
static inline void
sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
{
uint32_t i;
for (i = 0; i < vec->num; i++)
vec->status[i] = errnum;
}
uint32_t
rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
struct rte_crypto_sym_vec *vec)
{
struct rte_cryptodev *dev;
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
sym_crypto_fill_status(vec, EINVAL);
return 0;
}
dev = rte_cryptodev_pmd_get_dev(dev_id);
if (*dev->dev_ops->sym_cpu_process == NULL ||
!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
sym_crypto_fill_status(vec, ENOTSUP);
return 0;
}
return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
}
/** Initialise rte_crypto_op mempool element */
static void
rte_crypto_op_init(struct rte_mempool *mempool,

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2015-2017 Intel Corporation.
* Copyright(c) 2015-2020 Intel Corporation.
*/
#ifndef _RTE_CRYPTODEV_H_
@ -450,6 +450,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
/**< Support encrypted-digest operations where digest is appended to data */
#define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
/**< Support asymmetric session-less operations */
#define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
/**< Support symmetric cpu-crypto processing */
/**
@ -1274,6 +1276,24 @@ void *
rte_cryptodev_sym_session_get_user_data(
struct rte_cryptodev_sym_session *sess);
/**
* Perform actual crypto processing (encrypt/digest or auth/decrypt)
* on user provided data.
*
* @param dev_id The device identifier.
* @param sess Cryptodev session structure
* @param ofs Start and stop offsets for auth and cipher operations
* @param vec Vectorized operation descriptor
*
* @return
* - Returns number of successfully processed packets.
*/
__rte_experimental
uint32_t
rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
struct rte_crypto_sym_vec *vec);
#ifdef __cplusplus
}
#endif

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2015-2016 Intel Corporation.
* Copyright(c) 2015-2020 Intel Corporation.
*/
#ifndef _RTE_CRYPTODEV_PMD_H_
@ -308,6 +308,23 @@ typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
*/
typedef void (*cryptodev_asym_free_session_t)(struct rte_cryptodev *dev,
struct rte_cryptodev_asym_session *sess);
/**
* Perform actual crypto processing (encrypt/digest or auth/decrypt)
* on user provided data.
*
* @param dev Crypto device pointer
* @param sess Cryptodev session structure
* @param ofs Start and stop offsets for auth and cipher operations
* @param vec Vectorized operation descriptor
*
* @return
* - Returns number of successfully processed packets.
*
*/
typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
/** Crypto device operations function pointer table */
struct rte_cryptodev_ops {
@ -342,6 +359,8 @@ struct rte_cryptodev_ops {
/**< Clear a Crypto sessions private data. */
cryptodev_asym_free_session_t asym_session_clear;
/**< Clear a Crypto sessions private data. */
cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
/**< process input data synchronously (cpu-crypto). */
};

View File

@ -71,6 +71,7 @@ EXPERIMENTAL {
rte_cryptodev_asym_session_init;
rte_cryptodev_asym_xform_capability_check_modlen;
rte_cryptodev_asym_xform_capability_check_optype;
rte_cryptodev_sym_cpu_crypto_process;
rte_cryptodev_sym_get_existing_header_session_size;
rte_cryptodev_sym_session_get_user_data;
rte_cryptodev_sym_session_pool_create;