cryptodev: remove digest length from crypto op

Digest length was duplicated in the authentication transform
and the crypto operation structures.

Since digest length is not expected to change in a same
session, it is removed from the crypto operation.

Also, the length has been shrunk to 16 bits,
which should be sufficient for any digest.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>
This commit is contained in:
Pablo de Lara 2017-07-02 06:41:18 +01:00
parent 12a4aaf1df
commit 7f0034275a
22 changed files with 118 additions and 145 deletions

View File

@ -161,7 +161,6 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@ -183,7 +182,6 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
sym_op->auth.digest.length = options->auth_digest_sz;
sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
sym_op->auth.aad.data = test_vector->aad.data;
@ -246,7 +244,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@ -268,7 +265,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
sym_op->auth.digest.length = options->auth_digest_sz;
sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
sym_op->auth.aad.data = test_vector->aad.data;
}
@ -337,7 +333,6 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = sym_op->cipher.data.length +
@ -360,8 +355,6 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
sym_op->auth.digest.length = options->auth_digest_sz;
}
sym_op->auth.data.length = options->test_buffer_size;

View File

@ -547,7 +547,6 @@ chain.
struct {
uint8_t *data;
phys_addr_t phys_addr;
uint16_t length;
} digest; /**< Digest parameters */
struct {

View File

@ -94,6 +94,7 @@ New Features
of the crypto operation.
* Moved length and offset of cipher IV to ``rte_crypto_cipher_xform``.
* Removed Additional Authentication Data (AAD) length.
* Removed digest length.
* **Reorganized the crypto operation structure.**
@ -201,6 +202,7 @@ ABI Changes
* Added authentication IV length and offset parameters.
* Changed field size of AAD length from uint32_t to uint16_t.
* Changed field size of digest length from uint32_t to uint16_t.
Shared Library Versions

View File

@ -78,6 +78,7 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
{
const struct rte_crypto_sym_xform *auth_xform;
const struct rte_crypto_sym_xform *cipher_xform;
uint16_t digest_length;
if (xform->next == NULL || xform->next->next != NULL) {
GCM_LOG_ERR("Two and only two chained xform required");
@ -128,6 +129,8 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
return -EINVAL;
}
digest_length = auth_xform->auth.digest_length;
/* Check key length and calculate GCM pre-compute. */
switch (cipher_xform->cipher.key.length) {
case 16:
@ -146,6 +149,14 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
}
sess->aad_length = auth_xform->auth.add_auth_data_length;
/* Digest check */
if (digest_length != 16 &&
digest_length != 12 &&
digest_length != 8) {
GCM_LOG_ERR("digest");
return -EINVAL;
}
sess->digest_length = digest_length;
return 0;
}
@ -245,13 +256,6 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
*iv_padd = rte_bswap32(1);
}
if (sym_op->auth.digest.length != 16 &&
sym_op->auth.digest.length != 12 &&
sym_op->auth.digest.length != 8) {
GCM_LOG_ERR("digest");
return -1;
}
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
aesni_gcm_enc[session->key].init(&session->gdata,
@ -281,11 +285,11 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
aesni_gcm_enc[session->key].finalize(&session->gdata,
sym_op->auth.digest.data,
(uint64_t)sym_op->auth.digest.length);
(uint64_t)session->digest_length);
} else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
sym_op->m_dst : sym_op->m_src,
sym_op->auth.digest.length);
session->digest_length);
if (!auth_tag) {
GCM_LOG_ERR("auth_tag");
@ -319,7 +323,7 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
aesni_gcm_dec[session->key].finalize(&session->gdata,
auth_tag,
(uint64_t)sym_op->auth.digest.length);
(uint64_t)session->digest_length);
}
return 0;
@ -349,21 +353,21 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
m->data_len - op->sym->auth.digest.length);
m->data_len - session->digest_length);
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
op->sym->auth.digest.data, op->sym->auth.digest.length);
op->sym->auth.digest.data, session->digest_length);
rte_hexdump(stdout, "auth tag (calc):",
tag, op->sym->auth.digest.length);
tag, session->digest_length);
#endif
if (memcmp(tag, op->sym->auth.digest.data,
op->sym->auth.digest.length) != 0)
session->digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
rte_pktmbuf_trim(m, op->sym->auth.digest.length);
rte_pktmbuf_trim(m, session->digest_length);
}
}

View File

@ -97,6 +97,8 @@ struct aesni_gcm_session {
/**< IV parameters */
uint16_t aad_length;
/**< AAD length */
uint16_t digest_length;
/**< Digest length */
enum aesni_gcm_operation op;
/**< GCM operation type */
enum aesni_gcm_key key;

View File

@ -452,6 +452,9 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
return -EINVAL;
}
/* Set the digest length */
sess->auth.digest_length = auth_xform->auth.digest_length;
/* Verify supported key lengths and extract proper algorithm */
switch (cipher_xform->cipher.key.length << 3) {
case 128:
@ -649,7 +652,7 @@ process_armv8_chained_op
}
} else {
adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
op->sym->auth.digest.length);
sess->auth.digest_length);
}
arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@ -667,12 +670,12 @@ process_armv8_chained_op
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(adst, op->sym->auth.digest.data,
op->sym->auth.digest.length) != 0) {
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(m_asrc,
op->sym->auth.digest.length);
sess->auth.digest_length);
}
}

View File

@ -199,6 +199,8 @@ struct armv8_crypto_session {
/**< HMAC key (max supported length)*/
} hmac;
};
uint16_t digest_length;
/* Digest length */
} auth;
} __rte_cache_aligned;

View File

@ -84,7 +84,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
struct sec_flow_context *flc;
uint32_t auth_only_len = sym_op->auth.data.length -
sym_op->cipher.data.length;
int icv_len = sym_op->auth.digest.length;
int icv_len = sess->digest_length;
uint8_t *old_icv;
uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
@ -135,7 +135,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
sym_op->auth.data.offset,
sym_op->auth.data.length,
sym_op->auth.digest.length,
sess->digest_length,
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
sess->iv.length,
@ -161,7 +161,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
DPAA2_SET_FLE_ADDR(sge,
DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
sge->length = sym_op->auth.digest.length;
sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
sess->iv.length));
}
@ -177,7 +177,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
fle->length = (sess->dir == DIR_ENC) ?
(sym_op->auth.data.length + sess->iv.length) :
(sym_op->auth.data.length + sess->iv.length +
sym_op->auth.digest.length);
sess->digest_length);
/* Configure Input SGE for Encap/Decap */
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
@ -192,12 +192,12 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->auth.digest.data,
sym_op->auth.digest.length);
memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
sess->digest_length);
memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
sge->length = sym_op->auth.digest.length;
sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
sym_op->auth.digest.length +
sess->digest_length +
sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
@ -217,7 +217,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
uint32_t mem_len = (sess->dir == DIR_ENC) ?
(3 * sizeof(struct qbman_fle)) :
(5 * sizeof(struct qbman_fle) +
sym_op->auth.digest.length);
sess->digest_length);
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
uint8_t *old_digest;
@ -251,7 +251,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
fle->length = sym_op->auth.digest.length;
fle->length = sess->digest_length;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_COMPOUND_FMT(fd);
@ -282,17 +282,17 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
sym_op->auth.digest.length);
sess->digest_length);
sge->length = sym_op->auth.data.length;
sge++;
old_digest = (uint8_t *)(sge + 1);
rte_memcpy(old_digest, sym_op->auth.digest.data,
sym_op->auth.digest.length);
memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
sess->digest_length);
memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
sge->length = sym_op->auth.digest.length;
sge->length = sess->digest_length;
fle->length = sym_op->auth.data.length +
sym_op->auth.digest.length;
sess->digest_length;
DPAA2_SET_FLE_FIN(sge);
}
DPAA2_SET_FLE_FIN(fle);
@ -912,6 +912,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
session->digest_length = xform->auth.digest_length;
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@ -1064,6 +1066,8 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
session->digest_length = auth_xform->digest_length;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;

View File

@ -191,6 +191,7 @@ typedef struct dpaa2_sec_session_entry {
uint16_t length; /**< IV length in bytes */
uint16_t offset; /**< IV offset in bytes */
} iv;
uint16_t digest_length;
uint8_t status;
union {
struct dpaa2_sec_cipher_ctxt cipher_ctxt;

View File

@ -132,6 +132,12 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
/* Only KASUMI F9 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
return -EINVAL;
if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
KASUMI_LOG_ERR("Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
sess->auth_iv_offset = auth_xform->auth.iv.offset;
@ -261,12 +267,6 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
uint8_t direction;
for (i = 0; i < num_ops; i++) {
if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
KASUMI_LOG_ERR("digest");
break;
}
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@ -288,19 +288,19 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
ops[i]->sym->auth.digest.length);
KASUMI_DIGEST_LENGTH);
sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
iv, src,
length_in_bits, dst, direction);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
ops[i]->sym->auth.digest.length) != 0)
KASUMI_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
ops[i]->sym->auth.digest.length);
KASUMI_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;

View File

@ -371,6 +371,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
}
sess->auth.aad_length = xform->auth.add_auth_data_length;
sess->auth.digest_length = xform->auth.digest_length;
return 0;
}
@ -1130,7 +1131,7 @@ process_openssl_auth_op
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
op->sym->auth.digest.length);
sess->auth.digest_length);
else {
dst = op->sym->auth.digest.data;
if (dst == NULL)
@ -1158,11 +1159,11 @@ process_openssl_auth_op
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(dst, op->sym->auth.digest.data,
op->sym->auth.digest.length) != 0) {
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(mbuf_src, op->sym->auth.digest.length);
rte_pktmbuf_trim(mbuf_src, sess->auth.digest_length);
}
if (status != 0)

View File

@ -165,6 +165,8 @@ struct openssl_session {
uint16_t aad_length;
/**< AAD length */
uint16_t digest_length;
/**< digest length */
} auth;
} __rte_cache_aligned;

View File

@ -135,6 +135,7 @@ struct qat_session {
uint16_t offset;
uint16_t length;
} auth_iv;
uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
};

View File

@ -606,6 +606,7 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
auth_xform->op))
goto error_out;
}
session->digest_length = auth_xform->digest_length;
return session;
error_out:
@ -1200,7 +1201,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
ctx->auth_iv.length);
}
rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
op->sym->auth.digest.length);
ctx->digest_length);
rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
ctx->aad_len);
}

View File

@ -132,6 +132,12 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
/* Only SNOW 3G UIA2 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
return -EINVAL;
if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
SNOW3G_LOG_ERR("Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
@ -252,12 +258,6 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
uint8_t *iv;
for (i = 0; i < num_ops; i++) {
if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
SNOW3G_LOG_ERR("digest");
break;
}
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@ -274,19 +274,19 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
ops[i]->sym->auth.digest.length);
SNOW3G_DIGEST_LENGTH);
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
iv, src,
length_in_bits, dst);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
ops[i]->sym->auth.digest.length) != 0)
SNOW3G_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
ops[i]->sym->auth.digest.length);
SNOW3G_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;

View File

@ -131,6 +131,12 @@ zuc_set_session_parameters(struct zuc_session *sess,
/* Only ZUC EIA3 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
return -EINVAL;
if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
ZUC_LOG_ERR("Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
@ -249,12 +255,6 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
uint8_t *iv;
for (i = 0; i < num_ops; i++) {
if (unlikely(ops[i]->sym->auth.digest.length != ZUC_DIGEST_LENGTH)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
ZUC_LOG_ERR("digest");
break;
}
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@ -271,19 +271,19 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
ops[i]->sym->auth.digest.length);
ZUC_DIGEST_LENGTH);
sso_zuc_eia3_1_buffer(session->pKey_hash,
iv, src,
length_in_bits, dst);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
ops[i]->sym->auth.digest.length) != 0)
ZUC_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
ops[i]->sym->auth.digest.length);
ZUC_DIGEST_LENGTH);
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;

View File

@ -140,7 +140,6 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.length = sa->digest_len;
return 0;
}
@ -368,7 +367,6 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.length = sa->digest_len;
return 0;
}

View File

@ -481,7 +481,6 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
op->sym->auth.digest.length = cparams->digest_length;
/* For wireless algorithms, offset/length must be in bits */
if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||

View File

@ -354,7 +354,7 @@ struct rte_crypto_auth_xform {
* (for example RFC 2104, FIPS 198a).
*/
uint32_t digest_length;
uint16_t digest_length;
/**< Length of the digest to be returned. If the verify option is set,
* this specifies the length of the digest to be compared for the
* session.
@ -604,10 +604,6 @@ struct rte_crypto_sym_op {
*/
phys_addr_t phys_addr;
/**< Physical address of digest */
uint16_t length;
/**< Length of digest. This must be the same value as
* @ref rte_crypto_auth_xform.digest_length.
*/
} digest; /**< Digest parameters */
struct {

View File

@ -1308,7 +1308,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
sym_op->auth.digest.data = ut_params->digest;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
sym_op->auth.data.offset = 0;
sym_op->auth.data.length = QUOTE_512_BYTES;
@ -1460,7 +1459,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
sym_op->auth.digest.data = ut_params->digest;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
sym_op->auth.data.offset = 0;
sym_op->auth.data.length = QUOTE_512_BYTES;
@ -2103,7 +2101,6 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
ut_params->digest = sym_op->auth.digest.data;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
sym_op->auth.digest.length = auth_tag_len;
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
else
@ -2111,7 +2108,7 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
auth_tag_len);
sym_op->auth.data.length = auth_len;
sym_op->auth.data.offset = auth_offset;
@ -2160,7 +2157,6 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
ut_params->digest = sym_op->auth.digest.data;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
sym_op->auth.digest.length = auth_tag_len;
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
else
@ -2168,7 +2164,7 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
auth_tag_len);
/* Copy cipher and auth IVs at the end of the crypto operation */
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op, uint8_t *,
@ -2228,7 +2224,6 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
ut_params->digest = sym_op->auth.digest.data;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
sym_op->auth.digest.length = auth_tag_len;
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
else
@ -2236,7 +2231,7 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
auth_tag_len);
/* Copy cipher and auth IVs at the end of the crypto operation */
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op, uint8_t *,
@ -2287,13 +2282,12 @@ create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
sym_op->auth.digest.length = auth_tag_len;
memset(sym_op->auth.digest.data, 0, auth_tag_len);
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
auth_tag_len);
/* Copy cipher and auth IVs at the end of the crypto operation */
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op, uint8_t *,
@ -4825,7 +4819,6 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
ut_params->ibuf,
plaintext_pad_len +
aad_pad_len);
sym_op->auth.digest.length = tdata->auth_tag.len;
} else {
sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
ut_params->ibuf, tdata->auth_tag.len);
@ -4834,13 +4827,12 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf,
plaintext_pad_len + aad_pad_len);
sym_op->auth.digest.length = tdata->auth_tag.len;
rte_memcpy(sym_op->auth.digest.data, tdata->auth_tag.data,
tdata->auth_tag.len);
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
tdata->auth_tag.len);
}
sym_op->cipher.data.length = tdata->plaintext.len;
@ -5615,7 +5607,6 @@ static int MD5_HMAC_create_op(struct crypto_unittest_params *ut_params,
"no room to append digest");
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, plaintext_pad_len);
sym_op->auth.digest.length = MD5_DIGEST_LEN;
if (ut_params->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
rte_memcpy(sym_op->auth.digest.data, test_case->auth_tag.data,
@ -6326,14 +6317,13 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, aad_pad_len);
sym_op->auth.digest.length = tdata->gmac_tag.len;
if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
tdata->gmac_tag.len);
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
tdata->gmac_tag.len);
}
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
@ -6811,7 +6801,6 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, reference->plaintext.len);
sym_op->auth.digest.length = reference->digest.len;
if (auth_generate)
memset(sym_op->auth.digest.data, 0, reference->digest.len);
@ -6822,7 +6811,7 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
reference->digest.len);
sym_op->auth.data.length = reference->plaintext.len;
sym_op->auth.data.offset = 0;
@ -6869,7 +6858,6 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, reference->ciphertext.len);
sym_op->auth.digest.length = reference->digest.len;
if (auth_generate)
memset(sym_op->auth.digest.data, 0, reference->digest.len);
@ -6880,7 +6868,7 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
reference->digest.len);
rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
reference->iv.data, reference->iv.len);
@ -6923,7 +6911,6 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, reference->ciphertext.len);
sym_op->auth.digest.length = reference->digest.len;
if (auth_generate)
memset(sym_op->auth.digest.data, 0, reference->digest.len);
@ -6934,7 +6921,7 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
reference->digest.len);
rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
reference->iv.data, reference->iv.len);
@ -7171,14 +7158,13 @@ create_gcm_operation_SGL(enum rte_crypto_cipher_operation op,
"no room to append digest");
sym_op->auth.digest.phys_addr = digest_phys;
sym_op->auth.digest.length = auth_tag_len;
if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
rte_memcpy(sym_op->auth.digest.data, tdata->auth_tag.data,
auth_tag_len);
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
auth_tag_len);
}
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,

View File

@ -325,7 +325,6 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
sym_op->auth.data.offset = 0;
sym_op->auth.data.length = tdata->ciphertext.len;
sym_op->auth.digest.length = digest_len;
}
/* create session for sessioned op */
@ -475,7 +474,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
sym_op->auth.data.offset;
changed_len = sym_op->auth.data.length;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
changed_len += sym_op->auth.digest.length;
changed_len += digest_len;
} else {
/* cipher-only */
head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
@ -517,7 +516,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
changed_len += sym_op->auth.digest.length;
changed_len += digest_len;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY) {
/* white-box test: PMDs use some of the

View File

@ -168,20 +168,19 @@ static struct rte_mbuf *
test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned data_len,
unsigned digest_len);
struct rte_cryptodev_sym_session *sess, unsigned int data_len);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
unsigned int digest_len, enum chain_mode chain);
enum chain_mode chain);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
unsigned int digest_len, enum chain_mode chain __rte_unused);
enum chain_mode chain __rte_unused);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
unsigned int digest_len, enum chain_mode chain __rte_unused);
enum chain_mode chain __rte_unused);
static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo);
@ -1979,7 +1978,6 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
op->sym->auth.digest.data = ut_params->digest;
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
op->sym->auth.data.offset = 0;
op->sym->auth.data.length = data_params[0].length;
@ -2102,8 +2100,7 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size,
get_auth_digest_length(pparams->auth_algo));
op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
@ -2252,11 +2249,9 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
static struct rte_crypto_op *(*test_perf_set_crypto_op)
(struct rte_crypto_op *, struct rte_mbuf *,
struct rte_cryptodev_sym_session *,
unsigned int, unsigned int,
unsigned int,
enum chain_mode);
unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
if (rte_cryptodev_count() == 0) {
printf("\nNo crypto devices found. Is PMD build configured?\n");
return TEST_FAILED;
@ -2298,7 +2293,7 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
}
op = test_perf_set_crypto_op(op, m, sess, pparams->buf_size,
digest_length, pparams->chain);
pparams->chain);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
@ -2407,8 +2402,6 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
static struct rte_cryptodev_sym_session *sess;
unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
if (rte_cryptodev_count() == 0) {
printf("\nNo crypto devices found. Is PMD build configured?\n");
return TEST_FAILED;
@ -2433,7 +2426,7 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
op = test_perf_set_crypto_op_aes(op, m, sess, pparams->buf_size,
digest_length, pparams->chain);
pparams->chain);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
@ -2875,7 +2868,7 @@ test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
unsigned int digest_len, enum chain_mode chain)
enum chain_mode chain)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@ -2886,7 +2879,6 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
if (chain == CIPHER_ONLY) {
op->sym->auth.digest.data = NULL;
op->sym->auth.digest.phys_addr = 0;
op->sym->auth.digest.length = 0;
op->sym->auth.aad.data = NULL;
op->sym->auth.data.offset = 0;
op->sym->auth.data.length = 0;
@ -2895,7 +2887,6 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
uint8_t *, data_len);
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_len);
op->sym->auth.digest.length = digest_len;
op->sym->auth.data.offset = 0;
op->sym->auth.data.length = data_len;
}
@ -2917,7 +2908,7 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
unsigned int digest_len, enum chain_mode chain __rte_unused)
enum chain_mode chain __rte_unused)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@ -2929,7 +2920,6 @@ test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
(m->data_off + data_len);
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
op->sym->auth.aad.data = aes_gcm_aad;
/* Copy IV at the end of the crypto operation */
@ -2950,8 +2940,7 @@ test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned data_len,
unsigned digest_len)
struct rte_cryptodev_sym_session *sess, unsigned int data_len)
{
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
uint8_t *, IV_OFFSET);
@ -2968,7 +2957,6 @@ test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
(m->data_off + data_len);
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
/* Data lengths/offsets Parameters */
op->sym->auth.data.offset = 0;
@ -3015,8 +3003,7 @@ static inline struct rte_crypto_op *
test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess,
unsigned data_len,
unsigned digest_len)
unsigned int data_len)
{
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
uint8_t *, IV_OFFSET);
@ -3036,7 +3023,6 @@ test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len +
SNOW3G_CIPHER_IV_LENGTH);
op->sym->auth.digest.length = digest_len;
/* Data lengths/offsets Parameters */
op->sym->auth.data.offset = 0;
@ -3051,7 +3037,7 @@ test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
unsigned int digest_len, enum chain_mode chain __rte_unused)
enum chain_mode chain __rte_unused)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@ -3063,7 +3049,6 @@ test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
(m->data_off + data_len);
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
/* Copy IV at the end of the crypto operation */
rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
@ -3156,7 +3141,7 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op_aes(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))],
sess, pparams->buf_size, digest_length,
sess, pparams->buf_size,
pparams->chain);
/* enqueue burst */
@ -3298,7 +3283,7 @@ test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
mbufs[i +
(pparams->burst_size * (j % NUM_MBUF_SETS))],
sess,
pparams->buf_size, digest_length);
pparams->buf_size);
else if (pparams->chain == CIPHER_ONLY)
ops[i+op_offset] =
test_perf_set_crypto_op_snow3g_cipher(ops[i+op_offset],
@ -3394,8 +3379,6 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
uint64_t processed = 0, failed_polls = 0, retries = 0;
uint64_t tsc_start = 0, tsc_end = 0;
unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
struct rte_crypto_op *ops[pparams->burst_size];
struct rte_crypto_op *proc_ops[pparams->burst_size];
@ -3408,7 +3391,7 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
static struct rte_crypto_op *(*test_perf_set_crypto_op)
(struct rte_crypto_op *, struct rte_mbuf *,
struct rte_cryptodev_sym_session *,
unsigned int, unsigned int,
unsigned int,
enum chain_mode);
switch (pparams->cipher_algo) {
@ -3470,7 +3453,7 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))],
sess, pparams->buf_size, digest_length,
sess, pparams->buf_size,
pparams->chain);
/* enqueue burst */
@ -3548,8 +3531,6 @@ test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
uint64_t processed = 0, failed_polls = 0, retries = 0;
uint64_t tsc_start = 0, tsc_end = 0;
unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
struct rte_crypto_op *ops[pparams->burst_size];
struct rte_crypto_op *proc_ops[pparams->burst_size];
@ -3604,7 +3585,7 @@ test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op_aes(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))], sess,
pparams->buf_size, digest_length,
pparams->buf_size,
pparams->chain);
/* enqueue burst */
@ -4179,7 +4160,6 @@ perf_gcm_set_crypto_op(struct rte_crypto_op *op, struct rte_mbuf *m,
params->session_attrs->aad_len +
params->symmetric_op->p_len);
op->sym->auth.digest.length = params->symmetric_op->t_len;
op->sym->auth.aad.data = m_hlp->aad;
op->sym->auth.aad.phys_addr = rte_pktmbuf_mtophys(m);