crypto/qat: support single-pass GMAC on GEN3

This patch implements Single-Pass AES-GMAC possible on QAT GEN3
which improves the performance. On GEN1 and GEN2 the previous
chained method is used.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
This commit is contained in:
Adam Dybkowski 2021-04-14 12:33:21 +01:00 committed by Akhil Goyal
parent abc64f1075
commit 45fe9ea971
6 changed files with 132 additions and 28 deletions

View File

@ -862,7 +862,8 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
nb_fw_responses = 1;
if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
qat_sym_process_response(ops, resp_msg);
qat_sym_process_response(ops, resp_msg,
tmp_qp->op_cookies[head >> rx_queue->trailz]);
else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
nb_fw_responses = qat_comp_process_response(
ops, resp_msg,

View File

@ -144,6 +144,72 @@ set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
iv_length);
}
/** Handle Single-Pass AES-GMAC on QAT GEN3 */
static inline void
handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
struct qat_sym_op_cookie *cookie,
struct icp_qat_fw_la_bulk_req *qat_req)
{
static const uint32_t ver_key_offset =
sizeof(struct icp_qat_hw_auth_setup) +
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
ICP_QAT_HW_GALOIS_E_CTR0_SZ +
sizeof(struct icp_qat_hw_cipher_config);
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
(void *) &qat_req->cd_ctrl;
struct icp_qat_fw_la_cipher_req_params *cipher_param =
(void *) &qat_req->serv_specif_rqpars;
uint32_t data_length = op->sym->auth.data.length;
/* Fill separate Content Descriptor for this op */
rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
ctx->cd.cipher.key :
RTE_PTR_ADD(&ctx->cd, ver_key_offset),
ctx->auth_key_length);
cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
ICP_QAT_HW_CIPHER_CONFIG_BUILD(
ICP_QAT_HW_CIPHER_AEAD_MODE,
ctx->qat_cipher_alg,
ICP_QAT_HW_CIPHER_NO_CONVERT,
(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
ICP_QAT_HW_CIPHER_ENCRYPT :
ICP_QAT_HW_CIPHER_DECRYPT));
QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
ctx->digest_length,
QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
/* Update the request */
qat_req->cd_pars.u.s.content_desc_addr =
cookie->opt.spc_gmac.cd_phys_addr;
qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
sizeof(struct icp_qat_hw_cipher_config) +
ctx->auth_key_length, 8) >> 3;
qat_req->comn_mid.src_length = data_length;
qat_req->comn_mid.dst_length = 0;
cipher_param->spc_aad_addr = 0;
cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
cipher_param->spc_aad_sz = data_length;
cipher_param->reserved = 0;
cipher_param->spc_auth_res_sz = ctx->digest_length;
qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
cipher_cd_ctrl->cipher_cfg_offset = 0;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
ICP_QAT_FW_LA_PROTO_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_PROTO);
}
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen)
@ -619,11 +685,15 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
/* Handle Single-Pass GCM */
if (ctx->is_single_pass) {
/* Handle Single-Pass GCM */
cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
cipher_param->spc_auth_res_addr =
op->sym->aead.digest.phys_addr;
} else if (ctx->is_single_pass_gmac &&
op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
/* Handle Single-Pass AES-GMAC */
handle_spc_gmac(ctx, op, cookie, qat_req);
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG

View File

@ -29,6 +29,9 @@
*/
#define QAT_SYM_SGL_MAX_NUMBER 16
/* Maximum data length for single pass GMAC: 2^14-1 */
#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
struct qat_sym_session;
struct qat_sym_sgl {
@ -41,6 +44,14 @@ struct qat_sym_op_cookie {
struct qat_sym_sgl qat_sgl_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
union {
/* Used for Single-Pass AES-GMAC only */
struct {
struct icp_qat_hw_cipher_algo_blk cd_cipher
__rte_packed __rte_cache_aligned;
phys_addr_t cd_phys_addr;
} spc_gmac;
} opt;
};
int
@ -212,46 +223,46 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
#endif
static inline void
qat_sym_process_response(void **op, uint8_t *resp)
qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
{
struct icp_qat_fw_comn_resp *resp_msg =
(struct icp_qat_fw_comn_resp *)resp;
struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
(resp_msg->opaque_data);
struct qat_sym_session *sess;
uint8_t is_docsis_sec;
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
#endif
#ifdef RTE_LIB_SECURITY
if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
/*
* Assuming at this point that if it's a security
* op, that this is for DOCSIS
*/
sess = (struct qat_sym_session *)
get_sec_session_private_data(
rx_op->sym->sec_session);
is_docsis_sec = 1;
} else
#endif
{
sess = (struct qat_sym_session *)
get_sym_session_private_data(
rx_op->sym->session,
qat_sym_driver_id);
is_docsis_sec = 0;
}
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
#ifdef RTE_LIB_SECURITY
uint8_t is_docsis_sec = 0;
if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
/*
* Assuming at this point that if it's a security
* op, that this is for DOCSIS
*/
sess = (struct qat_sym_session *)
get_sec_session_private_data(
rx_op->sym->sec_session);
is_docsis_sec = 1;
} else
#endif
{
sess = (struct qat_sym_session *)
get_sym_session_private_data(
rx_op->sym->session,
qat_sym_driver_id);
}
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (sess->bpi_ctx) {
@ -262,6 +273,14 @@ qat_sym_process_response(void **op, uint8_t *resp)
#endif
}
}
if (sess->is_single_pass_gmac) {
struct qat_sym_op_cookie *cookie =
(struct qat_sym_op_cookie *) op_cookie;
memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
sess->auth_key_length);
}
*op = (void *)rx_op;
}
@ -283,7 +302,8 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
}
static inline void
qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused)
{
}

View File

@ -211,6 +211,12 @@ static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
rte_mempool_virt2iova(cookie) +
offsetof(struct qat_sym_op_cookie,
qat_sgl_dst);
cookie->opt.spc_gmac.cd_phys_addr =
rte_mempool_virt2iova(cookie) +
offsetof(struct qat_sym_op_cookie,
opt.spc_gmac.cd_cipher);
}
/* Get fw version from QAT (GEN2), skip if we've got it already */

View File

@ -537,6 +537,8 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_sym_session *session = session_private;
struct qat_sym_dev_private *internals = dev->data->dev_private;
enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
int ret;
int qat_cmd_id;
@ -571,6 +573,10 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
ret = qat_sym_session_configure_auth(dev, xform, session);
if (ret < 0)
return ret;
session->is_single_pass_gmac =
qat_dev_gen == QAT_GEN3 &&
xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
@ -706,8 +712,9 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct qat_sym_dev_private *internals = dev->data->dev_private;
const uint8_t *key_data = auth_xform->key.data;
uint8_t key_length = auth_xform->key.length;
session->aes_cmac = 0;
session->aes_cmac = 0;
session->auth_key_length = auth_xform->key.length;
session->auth_iv.offset = auth_xform->iv.offset;
session->auth_iv.length = auth_xform->iv.length;
session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
@ -765,7 +772,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
if (session->auth_iv.length == 0)
session->auth_iv.length = AES_GCM_J0_LEN;
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;

View File

@ -36,7 +36,6 @@
/* 96-bit case of IV for CCP/GCM single pass algorithm */
#define QAT_AES_GCM_SPC_IV_SIZE 12
#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
@ -86,11 +85,13 @@ struct qat_sym_session {
uint16_t offset;
uint16_t length;
} auth_iv;
uint16_t auth_key_length;
uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
enum qat_device_gen min_qat_dev_gen;
uint8_t aes_cmac;
uint8_t is_single_pass;
uint8_t is_single_pass_gmac;
};
int