crypto/cnxk: add event metadata set operation

Added cryptodev operation for setting event crypto
metadata for all supported sessions - sym/asym/security.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
Signed-off-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
Acked-by: Anoob Joseph <anoobj@marvell.com>
This commit is contained in:
Volodymyr Fialko 2022-05-12 18:15:22 +05:30 committed by Akhil Goyal
parent a7ddfa9c27
commit 97ebfda829
7 changed files with 255 additions and 53 deletions

View File

@ -264,30 +264,136 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count + i;
}
uint16_t
cn10k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
static int
cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
void *sess,
enum rte_crypto_op_type op_type,
enum rte_crypto_op_sess_type sess_type,
void *mdata)
{
union rte_event_crypto_metadata *ec_mdata;
struct cpt_inflight_req *infl_req;
union rte_event_crypto_metadata *ec_mdata = mdata;
struct rte_event *rsp_info;
uint64_t lmt_base, lmt_arg;
struct cpt_inst_s *inst;
struct cnxk_cpt_qp *qp;
uint8_t cdev_id;
uint16_t lmt_id;
uint16_t qp_id;
int ret;
ec_mdata = cnxk_event_crypto_mdata_get(op);
if (!ec_mdata) {
rte_errno = EINVAL;
return 0;
}
int16_t qp_id;
uint64_t w2;
/* Get queue pair */
cdev_id = ec_mdata->request_info.cdev_id;
qp_id = ec_mdata->request_info.queue_pair_id;
qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
/* Prepare w2 */
rsp_info = &ec_mdata->response_info;
w2 = CNXK_CPT_INST_W2(
(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
rsp_info->sched_type, rsp_info->queue_id, 0);
/* Set meta according to session type */
if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
struct cn10k_sec_session *priv;
struct cn10k_ipsec_sa *sa;
priv = get_sec_session_private_data(sess);
sa = &priv->sa;
sa->qp = qp;
sa->inst.w2 = w2;
} else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct cnxk_se_sess *priv;
priv = get_sym_session_private_data(
sess, cn10k_cryptodev_driver_id);
priv->qp = qp;
priv->cpt_inst_w2 = w2;
} else
return -EINVAL;
} else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct rte_cryptodev_asym_session *asym_sess = sess;
struct cnxk_ae_sess *priv;
priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
priv->qp = qp;
priv->cpt_inst_w2 = w2;
} else
return -EINVAL;
} else
return -EINVAL;
return 0;
}
static inline int
cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
struct cnxk_cpt_qp **qp, uint64_t *w2)
{
if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
struct cn10k_sec_session *priv;
struct cn10k_ipsec_sa *sa;
priv = get_sec_session_private_data(op->sym->sec_session);
sa = &priv->sa;
*qp = sa->qp;
*w2 = sa->inst.w2;
} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct cnxk_se_sess *priv;
priv = get_sym_session_private_data(
op->sym->session, cn10k_cryptodev_driver_id);
*qp = priv->qp;
*w2 = priv->cpt_inst_w2;
} else {
union rte_event_crypto_metadata *ec_mdata;
struct rte_event *rsp_info;
uint8_t cdev_id;
uint16_t qp_id;
ec_mdata = (union rte_event_crypto_metadata *)
((uint8_t *)op + op->private_data_offset);
if (!ec_mdata)
return -EINVAL;
rsp_info = &ec_mdata->response_info;
cdev_id = ec_mdata->request_info.cdev_id;
qp_id = ec_mdata->request_info.queue_pair_id;
*qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
*w2 = CNXK_CPT_INST_W2(
(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
rsp_info->sched_type, rsp_info->queue_id, 0);
}
} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct rte_cryptodev_asym_session *asym_sess;
struct cnxk_ae_sess *priv;
asym_sess = op->asym->session;
priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
*qp = priv->qp;
*w2 = priv->cpt_inst_w2;
} else
return -EINVAL;
} else
return -EINVAL;
return 0;
}
uint16_t
cn10k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
{
struct cpt_inflight_req *infl_req;
uint64_t lmt_base, lmt_arg, w2;
struct cpt_inst_s *inst;
struct cnxk_cpt_qp *qp;
uint16_t lmt_id;
int ret;
ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
if (unlikely(ret)) {
rte_errno = EINVAL;
return 0;
}
if (unlikely(!qp->ca.enabled)) {
rte_errno = EINVAL;
@ -316,9 +422,7 @@ cn10k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
infl_req->qp = qp;
inst->w0.u64 = 0;
inst->res_addr = (uint64_t)&infl_req->res;
inst->w2.u64 = CNXK_CPT_INST_W2(
(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
rsp_info->sched_type, rsp_info->queue_id, 0);
inst->w2.u64 = w2;
inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
if (roc_cpt_is_iq_full(&qp->lf)) {
@ -327,7 +431,7 @@ cn10k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
return 0;
}
if (!rsp_info->sched_type)
if (inst->w2.s.tt == RTE_SCHED_TYPE_ORDERED)
roc_sso_hws_head_wait(base);
lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
@ -592,4 +696,6 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
.asym_session_configure = cnxk_ae_session_cfg,
.asym_session_clear = cnxk_ae_session_clear,
/* Event crypto ops */
.session_ev_mdata_set = cn10k_cpt_crypto_adapter_ev_mdata_set,
};

View File

@ -20,6 +20,8 @@ struct cn10k_ipsec_sa {
uint16_t iv_offset;
uint8_t iv_length;
bool is_outbound;
/** Queue pair */
struct cnxk_cpt_qp *qp;
/**
* End of SW mutable area

View File

@ -316,28 +316,134 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count;
}
uint16_t
cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
static int
cn9k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
void *sess,
enum rte_crypto_op_type op_type,
enum rte_crypto_op_sess_type sess_type,
void *mdata)
{
union rte_event_crypto_metadata *ec_mdata;
struct cpt_inflight_req *infl_req;
union rte_event_crypto_metadata *ec_mdata = mdata;
struct rte_event *rsp_info;
struct cnxk_cpt_qp *qp;
struct cpt_inst_s inst;
uint8_t cdev_id;
uint16_t qp_id;
int ret;
ec_mdata = cnxk_event_crypto_mdata_get(op);
if (!ec_mdata) {
rte_errno = EINVAL;
return 0;
}
uint64_t w2;
/* Get queue pair */
cdev_id = ec_mdata->request_info.cdev_id;
qp_id = ec_mdata->request_info.queue_pair_id;
qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
/* Prepare w2 */
rsp_info = &ec_mdata->response_info;
w2 = CNXK_CPT_INST_W2(
(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
rsp_info->sched_type, rsp_info->queue_id, 0);
/* Set meta according to session type */
if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
struct cn9k_sec_session *priv;
struct cn9k_ipsec_sa *sa;
priv = get_sec_session_private_data(sess);
sa = &priv->sa;
sa->qp = qp;
sa->inst.w2 = w2;
} else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct cnxk_se_sess *priv;
priv = get_sym_session_private_data(
sess, cn9k_cryptodev_driver_id);
priv->qp = qp;
priv->cpt_inst_w2 = w2;
} else
return -EINVAL;
} else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct rte_cryptodev_asym_session *asym_sess = sess;
struct cnxk_ae_sess *priv;
priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
priv->qp = qp;
priv->cpt_inst_w2 = w2;
} else
return -EINVAL;
} else
return -EINVAL;
return 0;
}
static inline int
cn9k_ca_meta_info_extract(struct rte_crypto_op *op,
struct cnxk_cpt_qp **qp, struct cpt_inst_s *inst)
{
if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
struct cn9k_sec_session *priv;
struct cn9k_ipsec_sa *sa;
priv = get_sec_session_private_data(op->sym->sec_session);
sa = &priv->sa;
*qp = sa->qp;
inst->w2.u64 = sa->inst.w2;
} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct cnxk_se_sess *priv;
priv = get_sym_session_private_data(
op->sym->session, cn9k_cryptodev_driver_id);
*qp = priv->qp;
inst->w2.u64 = priv->cpt_inst_w2;
} else {
union rte_event_crypto_metadata *ec_mdata;
struct rte_event *rsp_info;
uint8_t cdev_id;
uint16_t qp_id;
ec_mdata = (union rte_event_crypto_metadata *)
((uint8_t *)op + op->private_data_offset);
if (!ec_mdata)
return -EINVAL;
rsp_info = &ec_mdata->response_info;
cdev_id = ec_mdata->request_info.cdev_id;
qp_id = ec_mdata->request_info.queue_pair_id;
*qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
inst->w2.u64 = CNXK_CPT_INST_W2(
(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
rsp_info->sched_type, rsp_info->queue_id, 0);
}
} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
struct rte_cryptodev_asym_session *asym_sess;
struct cnxk_ae_sess *priv;
asym_sess = op->asym->session;
priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
*qp = priv->qp;
inst->w2.u64 = priv->cpt_inst_w2;
} else
return -EINVAL;
} else
return -EINVAL;
return 0;
}
uint16_t
cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
{
struct cpt_inflight_req *infl_req;
struct cnxk_cpt_qp *qp;
struct cpt_inst_s inst;
int ret;
ret = cn9k_ca_meta_info_extract(op, &qp, &inst);
if (unlikely(ret)) {
rte_errno = EINVAL;
return 0;
}
if (unlikely(!qp->ca.enabled)) {
rte_errno = EINVAL;
@ -362,9 +468,6 @@ cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
infl_req->qp = qp;
inst.w0.u64 = 0;
inst.res_addr = (uint64_t)&infl_req->res;
inst.w2.u64 = CNXK_CPT_INST_W2(
(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
rsp_info->sched_type, rsp_info->queue_id, 0);
inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
if (roc_cpt_is_iq_full(&qp->lf)) {
@ -373,7 +476,7 @@ cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
return 0;
}
if (!rsp_info->sched_type)
if (inst.w2.s.tt == RTE_SCHED_TYPE_ORDERED)
roc_sso_hws_head_wait(base);
cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp->lmtline.io_addr);
@ -613,4 +716,7 @@ struct rte_cryptodev_ops cn9k_cpt_ops = {
.asym_session_configure = cnxk_ae_session_cfg,
.asym_session_clear = cnxk_ae_session_clear,
/* Event crypto ops */
.session_ev_mdata_set = cn9k_cpt_crypto_adapter_ev_mdata_set,
};

View File

@ -42,6 +42,8 @@ struct cn9k_ipsec_sa {
struct cnxk_on_ipsec_ar ar;
/** Anti replay window size */
uint32_t replay_win_sz;
/** Queue pair */
struct cnxk_cpt_qp *qp;
};
struct cn9k_sec_session {

View File

@ -22,6 +22,8 @@ struct cnxk_ae_sess {
uint64_t *cnxk_fpm_iova;
struct roc_ae_ec_group **ec_grp;
uint64_t cpt_inst_w7;
uint64_t cpt_inst_w2;
struct cnxk_cpt_qp *qp;
};
static __rte_always_inline void

View File

@ -125,24 +125,6 @@ int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
struct rte_cryptodev_asym_session *sess);
void cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
static inline union rte_event_crypto_metadata *
cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
{
union rte_event_crypto_metadata *ec_mdata;
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
ec_mdata = rte_cryptodev_sym_session_get_user_data(
op->sym->session);
else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
op->private_data_offset)
ec_mdata = (union rte_event_crypto_metadata
*)((uint8_t *)op + op->private_data_offset);
else
return NULL;
return ec_mdata;
}
static __rte_always_inline void
pending_queue_advance(uint64_t *index, const uint64_t mask)
{

View File

@ -33,6 +33,8 @@ struct cnxk_se_sess {
uint16_t auth_iv_offset;
uint32_t salt;
uint64_t cpt_inst_w7;
uint64_t cpt_inst_w2;
struct cnxk_cpt_qp *qp;
struct roc_se_ctx roc_se_ctx;
} __rte_cache_aligned;