common/cnxk: support 103XX CPT

Added support for 103XX CPT variant.

Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
This commit is contained in:
Tejasree Kondoj 2022-10-19 19:45:04 +05:30 committed by Akhil Goyal
parent cb6bfc96c3
commit 7c19abdd0c
9 changed files with 868 additions and 753 deletions

View File

@ -157,6 +157,22 @@ union cpt_inst_w4 {
} s;
};
union cpt_inst_w5 {
uint64_t u64;
struct {
uint64_t dptr : 60;
uint64_t gather_sz : 4;
} s;
};
union cpt_inst_w6 {
uint64_t u64;
struct {
uint64_t rptr : 60;
uint64_t scatter_sz : 4;
} s;
};
union cpt_inst_w7 {
uint64_t u64;
struct {
@ -200,9 +216,15 @@ struct cpt_inst_s {
union cpt_inst_w4 w4;
uint64_t dptr;
union {
union cpt_inst_w5 w5;
uint64_t dptr;
};
uint64_t rptr;
union {
union cpt_inst_w6 w6;
uint64_t rptr;
};
union cpt_inst_w7 w7;
};

View File

@ -183,6 +183,17 @@ struct roc_se_sglist_comp {
uint64_t ptr[4];
};
struct roc_se_sg2list_comp {
union {
uint64_t len;
struct {
uint16_t len[3];
uint16_t valid_segs;
} s;
} u;
uint64_t ptr[3];
};
struct roc_se_enc_context {
uint64_t iv_source : 1;
uint64_t aes_key : 2;

View File

@ -99,7 +99,7 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
dev->driver_id = cn10k_cryptodev_driver_id;
dev->feature_flags = cnxk_cpt_default_ff_get();
cn10k_cpt_set_enqdeq_fns(dev);
cn10k_cpt_set_enqdeq_fns(dev, vf);
cn10k_sec_ops_override();
rte_cryptodev_pmd_probing_finish(dev);

View File

@ -29,6 +29,7 @@ struct ops_burst {
struct cn10k_sso_hws *ws;
struct cnxk_cpt_qp *qp;
uint16_t nb_ops;
bool is_sg_ver2;
};
/* Holds information required to send vector of operations */
@ -93,8 +94,8 @@ cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
}
static inline int
cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[], struct cpt_inst_s inst[],
struct cpt_inflight_req *infl_req, const bool is_sg_ver2)
{
struct cn10k_sec_session *sec_sess;
struct rte_crypto_asym_op *asym_op;
@ -126,8 +127,7 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
w7 = sec_sess->inst.w7;
} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
sess = CRYPTODEV_GET_SYM_SESS_PRIV(sym_op->session);
ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
&inst[0]);
ret = cpt_sym_inst_fill(qp, op, sess, infl_req, &inst[0], is_sg_ver2);
if (unlikely(ret))
return 0;
w7 = sess->cpt_inst_w7;
@ -138,8 +138,7 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
return 0;
}
ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
&inst[0]);
ret = cpt_sym_inst_fill(qp, op, sess, infl_req, &inst[0], is_sg_ver2);
if (unlikely(ret)) {
sym_session_clear(op->sym->session);
rte_mempool_put(qp->sess_mp, op->sym->session);
@ -177,7 +176,8 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
}
static uint16_t
cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const bool is_sg_ver2)
{
uint64_t lmt_base, lmt_arg, io_addr;
struct cpt_inflight_req *infl_req;
@ -222,7 +222,7 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
infl_req = &pend_q->req_queue[head];
infl_req->op_flags = 0;
ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);
ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req, is_sg_ver2);
if (unlikely(ret != 1)) {
plt_dp_err("Could not process op: %p", ops + i);
if (i == 0)
@ -266,12 +266,22 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count + i;
}
static uint16_t
cn10k_cpt_sg_ver1_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
return cn10k_cpt_enqueue_burst(qptr, ops, nb_ops, false);
}
static uint16_t
cn10k_cpt_sg_ver2_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
return cn10k_cpt_enqueue_burst(qptr, ops, nb_ops, true);
}
static int
cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
void *sess,
cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused, void *sess,
enum rte_crypto_op_type op_type,
enum rte_crypto_op_sess_type sess_type,
void *mdata)
enum rte_crypto_op_sess_type sess_type, void *mdata)
{
union rte_event_crypto_metadata *ec_mdata = mdata;
struct rte_event *rsp_info;
@ -324,8 +334,7 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
}
static inline int
cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
struct cnxk_cpt_qp **qp, uint64_t *w2)
cn10k_ca_meta_info_extract(struct rte_crypto_op *op, struct cnxk_cpt_qp **qp, uint64_t *w2)
{
if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
@ -514,7 +523,7 @@ ca_lmtst_vec_submit(struct ops_burst *burst, struct vec_request vec_tbl[], uint1
infl_req = infl_reqs[i];
infl_req->op_flags = 0;
ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req, burst->is_sg_ver2);
if (unlikely(ret != 1)) {
plt_cpt_dbg("Could not process op: %p", burst->op[i]);
if (i != 0)
@ -633,7 +642,7 @@ ca_lmtst_burst_submit(struct ops_burst *burst)
infl_req = infl_reqs[i];
infl_req->op_flags = 0;
ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req, burst->is_sg_ver2);
if (unlikely(ret != 1)) {
plt_dp_dbg("Could not process op: %p", burst->op[i]);
if (i != 0)
@ -686,8 +695,9 @@ ca_lmtst_burst_submit(struct ops_burst *burst)
return i;
}
uint16_t __rte_hot
cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
static inline uint16_t __rte_hot
cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events,
const bool is_sg_ver2)
{
uint16_t submitted, count = 0, vec_tbl_len = 0;
struct vec_request vec_tbl[nb_events];
@ -701,6 +711,7 @@ cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_ev
burst.ws = ws;
burst.qp = NULL;
burst.nb_ops = 0;
burst.is_sg_ver2 = is_sg_ver2;
for (i = 0; i < nb_events; i++) {
op = ev[i].event_ptr;
@ -762,6 +773,18 @@ cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_ev
return count;
}
uint16_t __rte_hot
cn10k_cpt_sg_ver1_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
{
return cn10k_cpt_crypto_adapter_enqueue(ws, ev, nb_events, false);
}
uint16_t __rte_hot
cn10k_cpt_sg_ver2_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
{
return cn10k_cpt_crypto_adapter_enqueue(ws, ev, nb_events, true);
}
static inline void
cn10k_cpt_sec_post_process(struct rte_crypto_op *cop, struct cpt_cn10k_res_s *res)
{
@ -1012,9 +1035,13 @@ cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
}
void
cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev, struct cnxk_cpt_vf *vf)
{
dev->enqueue_burst = cn10k_cpt_enqueue_burst;
if (vf->cpt.cpt_revision > ROC_CPT_REVISION_ID_106XX)
dev->enqueue_burst = cn10k_cpt_sg_ver2_enqueue_burst;
else
dev->enqueue_burst = cn10k_cpt_sg_ver1_enqueue_burst;
dev->dequeue_burst = cn10k_cpt_dequeue_burst;
rte_mb();

View File

@ -9,12 +9,17 @@
#include <rte_cryptodev.h>
#include <rte_eventdev.h>
#include "cnxk_cryptodev.h"
extern struct rte_cryptodev_ops cn10k_cpt_ops;
void cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
void cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev, struct cnxk_cpt_vf *vf);
__rte_internal
uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
uint16_t __rte_hot cn10k_cpt_sg_ver1_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
uint16_t nb_events);
__rte_internal
uint16_t __rte_hot cn10k_cpt_sg_ver2_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
uint16_t nb_events);
__rte_internal
uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);

View File

@ -91,7 +91,7 @@ cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
sym_op = op->sym;
sess = CRYPTODEV_GET_SYM_SESS_PRIV(sym_op->session);
ret = cpt_sym_inst_fill(qp, op, sess, infl_req, inst);
ret = cpt_sym_inst_fill(qp, op, sess, infl_req, inst, false);
inst->w7.u64 = sess->cpt_inst_w7;
} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
ret = cn9k_cpt_sec_inst_fill(op, infl_req, inst);
@ -102,7 +102,7 @@ cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
return -1;
}
ret = cpt_sym_inst_fill(qp, op, sess, infl_req, inst);
ret = cpt_sym_inst_fill(qp, op, sess, infl_req, inst, false);
if (unlikely(ret)) {
sym_session_clear(op->sym->session);
rte_mempool_put(qp->sess_mp, op->sym->session);

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,8 @@ INTERNAL {
cn9k_cpt_crypto_adapter_enqueue;
cn9k_cpt_crypto_adapter_dequeue;
cn10k_cpt_crypto_adapter_enqueue;
cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
cn10k_cpt_sg_ver2_crypto_adapter_enqueue;
cn10k_cpt_crypto_adapter_dequeue;
cn10k_cpt_crypto_adapter_vector_dequeue;

View File

@ -292,6 +292,7 @@ static void
cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
struct roc_cpt *cpt = roc_idev_cpt_get();
const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
NIX_RX_FASTPATH_MODES
@ -594,14 +595,16 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
}
}
}
event_dev->ca_enqueue = cn10k_cpt_crypto_adapter_enqueue;
if ((cpt != NULL) && (cpt->cpt_revision > ROC_CPT_REVISION_ID_106XX))
event_dev->ca_enqueue = cn10k_cpt_sg_ver2_crypto_adapter_enqueue;
else
event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
sso_hws_tx_adptr_enq_seg);
CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
else
CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
sso_hws_tx_adptr_enq);
CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq);
event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
}