cryptodev: change crypto symmetric vector structure

This patch updates ``rte_crypto_sym_vec`` structure to add
support for both cpu_crypto synchronous operation and
asynchronous raw data-path APIs. The patch also includes
AESNI-MB and AESNI-GCM PMD changes, unit test changes and
documentation updates.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Fan Zhang 2020-10-11 01:38:51 +01:00 committed by Akhil Goyal
parent a141f0c7e7
commit 8d928d47a2
9 changed files with 84 additions and 54 deletions

View File

@ -151,11 +151,11 @@ static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
int32_t n, st;
void *iv;
struct rte_crypto_sym_op *sop;
union rte_crypto_sym_ofs ofs;
struct rte_crypto_sgl sgl;
struct rte_crypto_sym_vec symvec;
struct rte_crypto_va_iova_ptr iv_ptr, aad_ptr, digest_ptr;
struct rte_crypto_vec vec[UINT8_MAX];
sop = op->sym;
@ -171,13 +171,17 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
sgl.vec = vec;
sgl.num = n;
symvec.sgl = &sgl;
iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
symvec.iv = &iv;
symvec.aad = (void **)&sop->aead.aad.data;
symvec.digest = (void **)&sop->aead.digest.data;
symvec.iv = &iv_ptr;
symvec.digest = &digest_ptr;
symvec.aad = &aad_ptr;
symvec.status = &st;
symvec.num = 1;
/* for CPU crypto the IOVA address is not required */
iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
digest_ptr.va = (void *)sop->aead.digest.data;
aad_ptr.va = (void *)sop->aead.aad.data;
ofs.raw = 0;
n = rte_cryptodev_sym_cpu_crypto_process(dev_id, sop->session, ofs,
@ -193,11 +197,11 @@ static void
process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
{
int32_t n, st;
void *iv;
struct rte_crypto_sym_op *sop;
union rte_crypto_sym_ofs ofs;
struct rte_crypto_sgl sgl;
struct rte_crypto_sym_vec symvec;
struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr;
struct rte_crypto_vec vec[UINT8_MAX];
sop = op->sym;
@ -213,13 +217,14 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
sgl.vec = vec;
sgl.num = n;
symvec.sgl = &sgl;
iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
symvec.iv = &iv;
symvec.aad = (void **)&sop->aead.aad.data;
symvec.digest = (void **)&sop->auth.digest.data;
symvec.iv = &iv_ptr;
symvec.digest = &digest_ptr;
symvec.status = &st;
symvec.num = 1;
iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
digest_ptr.va = (void *)sop->auth.digest.data;
ofs.raw = 0;
ofs.ofs.cipher.head = sop->cipher.data.offset - sop->auth.data.offset;
ofs.ofs.cipher.tail = (sop->auth.data.offset + sop->auth.data.length) -

View File

@ -620,7 +620,8 @@ operation descriptor (``struct rte_crypto_sym_vec``) containing:
descriptors of performed operations (``struct rte_crypto_sgl``). Each instance
of ``struct rte_crypto_sgl`` consists of a number of segments and a pointer to
an array of segment descriptors ``struct rte_crypto_vec``;
- pointers to arrays of size ``num`` containing IV, AAD and digest information,
- pointers to arrays of size ``num`` containing IV, AAD and digest information
in the ``cpu_crypto`` sub-structure,
- pointer to an array of size ``num`` where status information will be stored
for each operation.

View File

@ -345,6 +345,9 @@ API Changes
* vhost: Moved vDPA APIs from experimental to stable.
* cryptodev: The structure ``rte_crypto_sym_vec`` is updated to support both
cpu_crypto synchrounous operation and asynchronous raw data-path APIs.
* scheduler: Renamed functions ``rte_cryptodev_scheduler_slave_attach``,
``rte_cryptodev_scheduler_slave_detach`` and
``rte_cryptodev_scheduler_slaves_get`` to

View File

@ -535,9 +535,10 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
processed = 0;
for (i = 0; i < vec->num; ++i) {
aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
&vec->sgl[i], vec->iv[i], vec->aad[i]);
&vec->sgl[i], vec->iv[i].va,
vec->aad[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
gdata_ctx, vec->digest[i]);
gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
@ -553,9 +554,10 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
processed = 0;
for (i = 0; i < vec->num; ++i) {
aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
&vec->sgl[i], vec->iv[i], vec->aad[i]);
&vec->sgl[i], vec->iv[i].va,
vec->aad[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
gdata_ctx, vec->digest[i]);
gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
@ -576,9 +578,9 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
}
aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
&vec->sgl[i], vec->iv[i]);
&vec->sgl[i], vec->iv[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
gdata_ctx, vec->digest[i]);
gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}
@ -599,9 +601,9 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
}
aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
&vec->sgl[i], vec->iv[i]);
&vec->sgl[i], vec->iv[i].va);
vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
gdata_ctx, vec->digest[i]);
gdata_ctx, vec->digest[i].va);
processed += (vec->status[i] == 0);
}

View File

@ -1931,7 +1931,7 @@ generate_sync_dgst(struct rte_crypto_sym_vec *vec,
for (i = 0, k = 0; i != vec->num; i++) {
if (vec->status[i] == 0) {
memcpy(vec->digest[i], dgst[i], len);
memcpy(vec->digest[i].va, dgst[i], len);
k++;
}
}
@ -1947,7 +1947,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
for (i = 0, k = 0; i != vec->num; i++) {
if (vec->status[i] == 0) {
if (memcmp(vec->digest[i], dgst[i], len) != 0)
if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
vec->status[i] = EBADMSG;
else
k++;
@ -2010,9 +2010,8 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
}
/* Submit job for processing */
set_cpu_mb_job_params(job, s, sofs, buf, len,
vec->iv[i], vec->aad[i], tmp_dgst[i],
&vec->status[i]);
set_cpu_mb_job_params(job, s, sofs, buf, len, vec->iv[i].va,
vec->aad[i].va, tmp_dgst[i], &vec->status[i]);
job = submit_sync_job(mb_mgr);
j++;

View File

@ -51,26 +51,44 @@ struct rte_crypto_sgl {
};
/**
* Synchronous operation descriptor.
* Supposed to be used with CPU crypto API call.
* Crypto virtual and IOVA address descriptor, used to describe cryptographic
* data buffer without the length information. The length information is
* normally predefined during session creation.
*/
struct rte_crypto_va_iova_ptr {
void *va;
rte_iova_t iova;
};
/**
* Raw data operation descriptor.
* Supposed to be used with synchronous CPU crypto API call or asynchronous
* RAW data path API call.
*/
struct rte_crypto_sym_vec {
/** array of SGL vectors */
struct rte_crypto_sgl *sgl;
/** array of pointers to IV */
void **iv;
/** array of pointers to AAD */
void **aad;
/** array of pointers to digest */
void **digest;
/**
* array of statuses for each operation:
* - 0 on success
* - errno on error
*/
int32_t *status;
/** number of operations to perform */
uint32_t num;
/** array of SGL vectors */
struct rte_crypto_sgl *sgl;
/** array of pointers to cipher IV */
struct rte_crypto_va_iova_ptr *iv;
/** array of pointers to digest */
struct rte_crypto_va_iova_ptr *digest;
__extension__
union {
/** array of pointers to auth IV, used for chain operation */
struct rte_crypto_va_iova_ptr *auth_iv;
/** array of pointers to AAD, used for AEAD operation */
struct rte_crypto_va_iova_ptr *aad;
};
/**
* array of statuses for each operation:
* - 0 on success
* - errno on error
*/
int32_t *status;
};
/**

View File

@ -693,9 +693,9 @@ cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_ipsec_sa *sa;
struct replay_sqn *rsn;
union sym_op_data icv;
void *iv[num];
void *aad[num];
void *dgst[num];
struct rte_crypto_va_iova_ptr iv[num];
struct rte_crypto_va_iova_ptr aad[num];
struct rte_crypto_va_iova_ptr dgst[num];
uint32_t dr[num];
uint32_t l4ofs[num];
uint32_t clen[num];
@ -720,9 +720,9 @@ cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
l4ofs + k, rc, ivbuf[k]);
/* fill iv, digest and aad */
iv[k] = ivbuf[k];
aad[k] = icv.va + sa->icv_len;
dgst[k++] = icv.va;
iv[k].va = ivbuf[k];
aad[k].va = icv.va + sa->icv_len;
dgst[k++].va = icv.va;
} else {
dr[i - k] = i;
rte_errno = -rc;

View File

@ -449,9 +449,9 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
uint32_t i, k, n;
uint32_t l2, l3;
union sym_op_data icv;
void *iv[num];
void *aad[num];
void *dgst[num];
struct rte_crypto_va_iova_ptr iv[num];
struct rte_crypto_va_iova_ptr aad[num];
struct rte_crypto_va_iova_ptr dgst[num];
uint32_t dr[num];
uint32_t l4ofs[num];
uint32_t clen[num];
@ -488,9 +488,9 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
ivbuf[k]);
/* fill iv, digest and aad */
iv[k] = ivbuf[k];
aad[k] = icv.va + sa->icv_len;
dgst[k++] = icv.va;
iv[k].va = ivbuf[k];
aad[k].va = icv.va + sa->icv_len;
dgst[k++].va = icv.va;
} else {
dr[i - k] = i;
rte_errno = -rc;

View File

@ -112,7 +112,9 @@ mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs,
static inline void
cpu_crypto_bulk(const struct rte_ipsec_session *ss,
union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
struct rte_crypto_va_iova_ptr iv[],
struct rte_crypto_va_iova_ptr aad[],
struct rte_crypto_va_iova_ptr dgst[], uint32_t l4ofs[],
uint32_t clen[], uint32_t num)
{
uint32_t i, j, n;
@ -136,8 +138,8 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
/* fill the request structure */
symvec.sgl = &vecpkt[j];
symvec.iv = &iv[j];
symvec.aad = &aad[j];
symvec.digest = &dgst[j];
symvec.aad = &aad[j];
symvec.status = &st[j];
symvec.num = i - j;