common/cpt: prepopulate word7 in session

CPT inst word7 is an immutable data for a session.
This data can be populated in a session.

Signed-off-by: Archana Muniganti <marchana@marvell.com>
Acked-by: Anoob Joseph <anoobj@marvell.com>
This commit is contained in:
Archana Muniganti 2020-11-03 14:07:14 +05:30 committed by Akhil Goyal
parent 3231a72a1d
commit 6045c06a87
9 changed files with 91 additions and 119 deletions

View File

@ -69,7 +69,6 @@ struct cpt_request_info {
uint64_t ei0;
uint64_t ei1;
uint64_t ei2;
uint64_t ei3;
} ist;
uint8_t *rptr;
const struct otx2_cpt_qp *qp;

View File

@ -245,8 +245,8 @@ struct cpt_sess_misc {
uint16_t is_null:1;
/** Flag for GMAC */
uint16_t is_gmac:1;
/** Engine group */
uint16_t egrp:3;
/** Unused field */
uint16_t rsvd1:3;
/** AAD length */
uint16_t aad_length;
/** MAC len in bytes */
@ -255,14 +255,16 @@ struct cpt_sess_misc {
uint8_t iv_length;
/** Auth IV length in bytes */
uint8_t auth_iv_length;
/** Reserved field */
uint8_t rsvd1;
/** Unused field */
uint8_t rsvd2;
/** IV offset in bytes */
uint16_t iv_offset;
/** Auth IV offset in bytes */
uint16_t auth_iv_offset;
/** Salt */
uint32_t salt;
/** CPT inst word 7 */
uint64_t cpt_inst_w7;
/** Context DMA address */
phys_addr_t ctx_dma_addr;
};
@ -319,7 +321,7 @@ struct cpt_ctx {
mc_fc_context_t fctx;
mc_zuc_snow3g_ctx_t zs_ctx;
mc_kasumi_ctx_t k_ctx;
};
} mc_ctx;
uint8_t auth_key[1024];
};
@ -350,6 +352,7 @@ struct cpt_asym_sess_misc {
struct rte_crypto_modex_xform mod_ctx;
struct cpt_asym_ec_ctx ec_ctx;
};
uint64_t cpt_inst_w7;
};
/* Buffer pointer */

View File

@ -77,11 +77,11 @@ cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
}
static __rte_always_inline void
cpt_fc_salt_update(void *ctx,
cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
uint8_t *salt)
{
struct cpt_ctx *cpt_ctx = ctx;
memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
memcpy(fctx->enc.encr_iv, salt, 4);
}
static __rte_always_inline int
@ -190,10 +190,12 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
uint32_t keyx[4];
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->zsk_flags = 0;
}
@ -201,9 +203,11 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
cpt_ctx->snow3g = 0;
memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
memcpy(zs_ctx->ci_key, key, key_len);
memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->zsk_flags = 0;
}
@ -211,8 +215,10 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
cpt_ctx->k_ecb = 1;
memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
}
@ -220,16 +226,17 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
}
static __rte_always_inline int
cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
uint16_t key_len, uint8_t *salt)
cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
const uint8_t *key, uint16_t key_len, uint8_t *salt)
{
struct cpt_ctx *cpt_ctx = ctx;
mc_fc_context_t *fctx = &cpt_ctx->fctx;
mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
int ret;
ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
@ -480,7 +487,6 @@ cpt_digest_gen_prep(uint32_t flags,
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr, *m_vaddr;
uint64_t c_dma, m_dma;
opcode_info_t opcode;
@ -633,9 +639,6 @@ cpt_digest_gen_prep(uint32_t flags,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
/* vq command w3 */
vq_cmd_w3.u64 = 0;
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -643,7 +646,6 @@ cpt_digest_gen_prep(uint32_t flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -671,9 +673,8 @@ cpt_enc_hmac_prep(uint32_t flags,
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
uint64_t m_dma, offset_dma, ctx_dma;
uint64_t m_dma, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
opcode_info_t opcode;
@ -1003,13 +1004,6 @@ cpt_enc_hmac_prep(uint32_t flags,
req->ist.ei2 = rptr_dma;
}
ctx_dma = fc_params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, fctx);
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = ctx_dma;
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -1017,7 +1011,6 @@ cpt_enc_hmac_prep(uint32_t flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -1044,10 +1037,9 @@ cpt_dec_hmac_prep(uint32_t flags,
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
uint64_t m_dma, offset_dma, ctx_dma;
uint64_t m_dma, offset_dma;
opcode_info_t opcode;
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
@ -1388,13 +1380,6 @@ cpt_dec_hmac_prep(uint32_t flags,
req->ist.ei2 = rptr_dma;
}
ctx_dma = fc_params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, fctx);
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = ctx_dma;
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -1402,7 +1387,6 @@ cpt_dec_hmac_prep(uint32_t flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -1433,7 +1417,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4];
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
buf_p = &params->meta_buf;
@ -1710,12 +1693,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
req->ist.ei2 = rptr_dma;
}
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, zs_ctx);
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -1723,7 +1700,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -1753,7 +1729,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4], j;
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
buf_p = &params->meta_buf;
@ -1974,12 +1949,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
req->ist.ei2 = rptr_dma;
}
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, zs_ctx);
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -1987,7 +1956,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -2019,7 +1987,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
@ -2221,12 +2188,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, k_ctx);
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -2234,7 +2195,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -2263,7 +2223,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
@ -2411,12 +2370,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, k_ctx);
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@ -2424,7 +2377,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@ -2492,11 +2444,12 @@ cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
uint16_t key_len, uint16_t mac_len)
cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
const uint8_t *key, uint16_t key_len, uint16_t mac_len)
{
struct cpt_ctx *cpt_ctx = ctx;
mc_fc_context_t *fctx = &cpt_ctx->fctx;
mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
uint32_t keyx[4];
@ -2511,26 +2464,26 @@ cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
case SNOW3G_UIA2:
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case ZUC_EIA3:
cpt_ctx->snow3g = 0;
memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
memcpy(zs_ctx->ci_key, key, key_len);
memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_ECB:
/* Kasumi ECB mode */
cpt_ctx->k_ecb = 1;
memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_CBC:
memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;

View File

@ -210,7 +210,7 @@ get_cpt_inst(struct command_queue *cqueue)
}
static __rte_always_inline void
fill_cpt_inst(struct cpt_instance *instance, void *req)
fill_cpt_inst(struct cpt_instance *instance, void *req, uint64_t ucmd_w3)
{
struct command_queue *cqueue;
cpt_inst_s_t *cpt_ist_p;
@ -237,7 +237,7 @@ fill_cpt_inst(struct cpt_instance *instance, void *req)
/* MC EI2 */
cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
/* MC EI3 */
cpt_ist_p->s8x.ei3 = user_req->ist.ei3;
cpt_ist_p->s8x.ei3 = ucmd_w3;
}
static __rte_always_inline void

View File

@ -241,6 +241,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
{
struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
@ -254,7 +255,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
offsetof(struct cpt_ctx, fctx));
offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
@ -292,6 +293,13 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
mc_ctx);
misc->cpt_inst_w7 = vq_cmd_w3.u64;
return 0;
priv_put:
@ -372,6 +380,8 @@ otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
return ret;
}
priv->cpt_inst_w7 = 0;
set_asym_session_private_data(sess, dev->driver_id, priv);
return 0;
}
@ -401,14 +411,14 @@ otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
static __rte_always_inline int32_t __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
void *req)
void *req, uint64_t cpt_inst_w7)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
return -EAGAIN;
fill_cpt_inst(instance, req);
fill_cpt_inst(instance, req, cpt_inst_w7);
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
@ -496,7 +506,8 @@ otx_cpt_enq_single_asym(struct cpt_instance *instance,
goto req_fail;
}
ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
ret = otx_cpt_request_enqueue(instance, pqueue, params.req,
sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
@ -518,7 +529,8 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
{
struct cpt_sess_misc *sess;
struct rte_crypto_sym_op *sym_op = op->sym;
void *prep_req, *mdata = NULL;
struct cpt_request_info *prep_req;
void *mdata = NULL;
int ret = 0;
uint64_t cpt_op;
@ -530,10 +542,10 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
&prep_req);
(void **)&prep_req);
else
ret = fill_digest_params(op, sess, &instance->meta_info,
&mdata, &prep_req);
&mdata, (void **)&prep_req);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
@ -542,7 +554,8 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
}
/* Enqueue prepared instruction to h/w */
ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
ret = otx_cpt_request_enqueue(instance, pqueue, prep_req,
sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Buffer allocated for request preparation need to be freed */

View File

@ -356,6 +356,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
{
struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
@ -369,7 +370,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
offsetof(struct cpt_ctx, fctx));
offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
@ -407,15 +408,21 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
mc_ctx);
/*
* IE engines support IPsec operations
* SE engines support IPsec operations, Chacha-Poly and
* Air-Crypto operations
*/
if (misc->zsk_flag || misc->chacha_poly)
misc->egrp = OTX2_CPT_EGRP_SE;
vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
else
misc->egrp = OTX2_CPT_EGRP_SE_IE;
vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
misc->cpt_inst_w7 = vq_cmd_w3.u64;
return 0;
@ -428,7 +435,8 @@ priv_put:
static __rte_always_inline void __rte_hot
otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
struct cpt_request_info *req,
void *lmtline)
void *lmtline,
uint64_t cpt_inst_w7)
{
union cpt_inst_s inst;
uint64_t lmt_status;
@ -441,7 +449,7 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
inst.s9x.ei0 = req->ist.ei0;
inst.s9x.ei1 = req->ist.ei1;
inst.s9x.ei2 = req->ist.ei2;
inst.s9x.ei3 = req->ist.ei3;
inst.s9x.ei3 = cpt_inst_w7;
inst.s9x.qord = 1;
inst.s9x.grp = qp->ev.queue_id;
@ -470,14 +478,15 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
struct cpt_request_info *req)
struct cpt_request_info *req,
uint64_t cpt_inst_w7)
{
void *lmtline = qp->lmtline;
union cpt_inst_s inst;
uint64_t lmt_status;
if (qp->ca_enable) {
otx2_ca_enqueue_req(qp, req, lmtline);
otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
return 0;
}
@ -492,7 +501,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
inst.s9x.ei0 = req->ist.ei0;
inst.s9x.ei1 = req->ist.ei1;
inst.s9x.ei2 = req->ist.ei2;
inst.s9x.ei3 = req->ist.ei3;
inst.s9x.ei3 = cpt_inst_w7;
req->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
@ -529,7 +538,6 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
struct rte_crypto_asym_op *asym_op = op->asym;
struct asym_op_params params = {0};
struct cpt_asym_sess_misc *sess;
vq_cmd_word3_t *w3;
uintptr_t *cop;
void *mdata;
int ret;
@ -584,11 +592,7 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
goto req_fail;
}
/* Set engine group of AE */
w3 = (vq_cmd_word3_t *)&params.req->ist.ei3;
w3->s.grp = OTX2_CPT_EGRP_AE;
ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
@ -610,7 +614,6 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct rte_crypto_sym_op *sym_op = op->sym;
struct cpt_request_info *req;
struct cpt_sess_misc *sess;
vq_cmd_word3_t *w3;
uint64_t cpt_op;
void *mdata;
int ret;
@ -633,10 +636,7 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
w3->s.grp = sess->egrp;
ret = otx2_cpt_enqueue_req(qp, pend_q, req);
ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Free buffer allocated by fill params routines */
@ -671,7 +671,7 @@ otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
ret = otx2_cpt_enqueue_req(qp, pend_q, req);
ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
return ret;
}
@ -1266,6 +1266,7 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
struct rte_mempool *pool)
{
struct cpt_asym_sess_misc *priv;
vq_cmd_word3_t vq_cmd_w3;
int ret;
CPT_PMD_INIT_FUNC_TRACE();
@ -1286,7 +1287,12 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
return ret;
}
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
priv->cpt_inst_w7 = vq_cmd_w3.u64;
set_asym_session_private_data(sess, dev->driver_id, priv);
return 0;
}

View File

@ -323,7 +323,7 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
lp->ucmd_w3 = inst.u64[7];
lp->cpt_inst_w7 = inst.u64[7];
lp->ucmd_opcode = (lp->ctx_len << 8) |
(OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
@ -407,7 +407,7 @@ crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
lp->ucmd_w3 = inst.u64[7];
lp->cpt_inst_w7 = inst.u64[7];
lp->ucmd_opcode = (lp->ctx_len << 8) |
(OTX2_IPSEC_PO_PROCESS_IPSEC_INB);

View File

@ -18,7 +18,7 @@ struct otx2_sec_session_ipsec_lp {
struct otx2_ipsec_po_out_sa out_sa;
};
uint64_t ucmd_w3;
uint64_t cpt_inst_w7;
union {
uint64_t ucmd_w0;
struct {

View File

@ -123,7 +123,6 @@ process_outb_sa(struct rte_crypto_op *cop,
req->ist.ei0 = word0.u64;
req->ist.ei1 = rte_pktmbuf_iova(m_src);
req->ist.ei2 = req->ist.ei1;
req->ist.ei3 = sess->ucmd_w3;
hdr->seq = rte_cpu_to_be_32(sess->seq_lo);
hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
@ -170,7 +169,6 @@ process_inb_sa(struct rte_crypto_op *cop,
req->ist.ei0 = word0.u64;
req->ist.ei1 = rte_pktmbuf_iova(m_src);
req->ist.ei2 = req->ist.ei1;
req->ist.ei3 = sess->ucmd_w3;
exit:
*prep_req = req;