crypto/qat: support AES-CCM

This patch adds AES-CCM AEAD cipher and hash algorithm to
Intel QuickAssist Technology driver.

Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
This commit is contained in:
Arek Kusztal 2017-09-21 14:11:20 +01:00 committed by Pablo de Lara
parent 1a4998dc4d
commit ab56c4d9ed
5 changed files with 237 additions and 14 deletions

View File

@ -89,6 +89,10 @@ New Features
* Coalesce writes to HEAD CSR on response processing.
* Coalesce writes to TAIL CSR on request processing.
Additional support for:
* AES CCM algorithm.
* **Updated the AESNI MB PMD.**
The AESNI MB PMD has been updated with additional support for:

View File

@ -301,6 +301,26 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
/* These defines describe position of the bit-fields
* in the flags byte in B0
*/
#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
| ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
| ((q) - 1))
#define ICP_QAT_HW_CCM_NQ_CONST 15
#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
ICP_QAT_HW_CCM_AAD_LEN_INFO)
#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_cipher_config cipher_config;
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];

View File

@ -124,6 +124,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_NULL:
return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@ -876,6 +879,31 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_HW_AUTH_ALGO_NULL);
state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
if (aad_length > 0) {
aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
ICP_QAT_HW_CCM_AAD_LEN_INFO;
auth_param->u2.aad_sz =
RTE_ALIGN_CEIL(aad_length,
ICP_QAT_HW_CCM_AAD_ALIGNMENT);
} else {
auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
}
cdesc->aad_len = aad_length;
hash->auth_counter.counter = 0;
hash_cd_ctrl->outer_prefix_sz = digestsize;
auth_param->hash_state_sz = digestsize;
memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);

View File

@ -59,6 +59,7 @@
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
#include <rte_cryptodev_pci.h>
#include <rte_byteorder.h>
#include <openssl/evp.h>
#include "qat_logs.h"
@ -251,10 +252,21 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
/* AEAD */
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
/* AES-GCM and AES-CCM works with different direction
* GCM first encrypts and generate hash where AES-CCM
* first generate hash and encrypts. Similar relation
* applies to decryption.
*/
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
else
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
else
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
else
return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
}
if (xform->next == NULL)
@ -734,6 +746,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
struct qat_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
enum rte_crypto_auth_operation crypto_operation;
/*
* Store AEAD IV parameters as cipher IV,
@ -753,21 +766,33 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
aead_xform->algo);
return -ENOTSUP;
if (qat_alg_validate_aes_key(aead_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES key size");
return -EINVAL;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
break;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
aead_xform->algo);
return -EINVAL;
}
if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
/*
* It needs to create cipher desc content first,
* then authentication
*/
crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
if (qat_alg_aead_session_create_content_desc_cipher(session,
aead_xform->key.data,
aead_xform->key.length))
@ -778,7 +803,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
RTE_CRYPTO_AUTH_OP_GENERATE))
crypto_operation))
return -EINVAL;
} else {
session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
@ -786,12 +811,16 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
* It needs to create authentication desc content first,
* then cipher
*/
crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
if (qat_alg_aead_session_create_content_desc_auth(session,
aead_xform->key.data,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
RTE_CRYPTO_AUTH_OP_VERIFY))
crypto_operation))
return -EINVAL;
if (qat_alg_aead_session_create_content_desc_cipher(session,
@ -1043,7 +1072,6 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
@ -1153,6 +1181,29 @@ set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
}
}
/** Set IV for CCM is special case, 0th byte is set to q-1
* where q is padding of nonce in 16 byte block
*/
static inline void
set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
struct icp_qat_fw_la_cipher_req_params *cipher_param,
struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
{
rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
ICP_QAT_HW_CCM_NONCE_OFFSET,
rte_crypto_op_ctod_offset(op, uint8_t *,
iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
iv_length);
*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
q - ICP_QAT_HW_CCM_NONCE_OFFSET;
if (aad_len_field_sz)
rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
rte_crypto_op_ctod_offset(op, uint8_t *,
iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
iv_length);
}
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
@ -1197,6 +1248,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
return -EINVAL;
}
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@ -1205,9 +1258,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
/* AES-GCM */
/* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
&& ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
do_aead = 1;
} else {
do_auth = 1;
@ -1314,6 +1371,11 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
if (do_aead) {
/*
* This address may used for setting AAD physical pointer
* into IV offset from op
*/
phys_addr_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
@ -1327,6 +1389,87 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
/* In case of AES-CCM this may point to user selected memory
* or iv offset in cypto_op
*/
uint8_t *aad_data = op->sym->aead.aad.data;
/* This is true AAD length, it not includes 18 bytes of
* preceding data
*/
uint8_t aad_ccm_real_len = 0;
uint8_t aad_len_field_sz = 0;
uint32_t msg_len_be =
rte_bswap32(op->sym->aead.data.length);
if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
aad_ccm_real_len = ctx->aad_len -
ICP_QAT_HW_CCM_AAD_B0_LEN -
ICP_QAT_HW_CCM_AAD_LEN_INFO;
} else {
/*
* aad_len not greater than 18, so no actual aad data,
* then use IV after op for B0 block
*/
aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
ctx->cipher_iv.offset);
aad_phys_addr_aead =
rte_crypto_op_ctophys_offset(op,
ctx->cipher_iv.offset);
}
uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
ctx->digest_length, q);
if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
memcpy(aad_data + ctx->cipher_iv.length +
ICP_QAT_HW_CCM_NONCE_OFFSET
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
(uint8_t *)&msg_len_be,
ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
} else {
memcpy(aad_data + ctx->cipher_iv.length +
ICP_QAT_HW_CCM_NONCE_OFFSET,
(uint8_t *)&msg_len_be
+ (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- q), q);
}
if (aad_len_field_sz > 0) {
*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
= rte_bswap16(aad_ccm_real_len);
if ((aad_ccm_real_len + aad_len_field_sz)
% ICP_QAT_HW_CCM_AAD_B0_LEN) {
uint8_t pad_len = 0;
uint8_t pad_idx = 0;
pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
((aad_ccm_real_len + aad_len_field_sz) %
ICP_QAT_HW_CCM_AAD_B0_LEN);
pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
aad_ccm_real_len + aad_len_field_sz;
memset(&aad_data[pad_idx],
0, pad_len);
}
}
set_cipher_iv_ccm(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, q,
aad_len_field_sz);
}
cipher_len = op->sym->aead.data.length;
@ -1334,10 +1477,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_len = op->sym->aead.data.length;
auth_ofs = op->sym->aead.data.offset;
auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
auth_param->u1.aad_adr = aad_phys_addr_aead;
auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
cipher_param, op, qat_req);
min_ofs = op->sym->aead.data.offset;
}

View File

@ -183,6 +183,36 @@
}, } \
}, } \
}, \
{ /* AES CCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
{.aead = { \
.algo = RTE_CRYPTO_AEAD_AES_CCM, \
.block_size = 16, \
.key_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
}, \
.digest_size = { \
.min = 4, \
.max = 16, \
.increment = 2 \
}, \
.aad_size = { \
.min = 0, \
.max = 224, \
.increment = 1 \
}, \
.iv_size = { \
.min = 7, \
.max = 13, \
.increment = 1 \
}, \
}, } \
}, } \
}, \
{ /* AES GCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \