event/octeontx2: support crypto adapter forward mode

Advertise crypto adapter forward mode capability and set crypto adapter
enqueue function in driver.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
This commit is contained in:
Shijith Thotton 2021-04-15 14:43:50 +05:30 committed by Jerin Jacob
parent f96a8ebb27
commit 4a67a0573e
7 changed files with 129 additions and 21 deletions

View File

@ -7,6 +7,7 @@
#include <rte_cryptodev_pmd.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_event_crypto_adapter.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
@ -438,15 +439,35 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
return -ENOTSUP;
}
static __rte_always_inline void __rte_hot
static __rte_always_inline int32_t __rte_hot
otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
struct cpt_request_info *req,
void *lmtline,
struct rte_crypto_op *op,
uint64_t cpt_inst_w7)
{
union rte_event_crypto_metadata *m_data;
union cpt_inst_s inst;
uint64_t lmt_status;
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
m_data = rte_cryptodev_sym_session_get_user_data(
op->sym->session);
if (m_data == NULL) {
rte_pktmbuf_free(op->sym->m_src);
rte_crypto_op_free(op);
rte_errno = EINVAL;
return -EINVAL;
}
} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
op->private_data_offset) {
m_data = (union rte_event_crypto_metadata *)
((uint8_t *)op +
op->private_data_offset);
} else {
return -EINVAL;
}
inst.u[0] = 0;
inst.s9x.res_addr = req->comp_baddr;
inst.u[2] = 0;
@ -457,12 +478,11 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
inst.s9x.ei2 = req->ist.ei2;
inst.s9x.ei3 = cpt_inst_w7;
inst.s9x.qord = 1;
inst.s9x.grp = qp->ev.queue_id;
inst.s9x.tt = qp->ev.sched_type;
inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
qp->ev.flow_id;
inst.s9x.wq_ptr = (uint64_t)req >> 3;
inst.u[2] = (((RTE_EVENT_TYPE_CRYPTODEV << 28) |
m_data->response_info.flow_id) |
((uint64_t)m_data->response_info.sched_type << 32) |
((uint64_t)m_data->response_info.queue_id << 34));
inst.u[3] = 1 | (((uint64_t)req >> 3) << 3);
req->qp = qp;
do {
@ -479,22 +499,22 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
} while (lmt_status == 0);
return 0;
}
static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
struct cpt_request_info *req,
struct rte_crypto_op *op,
uint64_t cpt_inst_w7)
{
void *lmtline = qp->lmtline;
union cpt_inst_s inst;
uint64_t lmt_status;
if (qp->ca_enable) {
otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
return 0;
}
if (qp->ca_enable)
return otx2_ca_enqueue_req(qp, req, lmtline, op, cpt_inst_w7);
if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
return -EAGAIN;
@ -598,7 +618,8 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
goto req_fail;
}
ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, op,
sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
@ -642,7 +663,7 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Free buffer allocated by fill params routines */
@ -711,7 +732,7 @@ otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7);
if (winsz && esn) {
seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;

View File

@ -12,8 +12,9 @@
#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
#include "otx2_evdev_stats.h"
#include "otx2_evdev.h"
#include "otx2_evdev_crypto_adptr_tx.h"
#include "otx2_evdev_stats.h"
#include "otx2_irq.h"
#include "otx2_tim_evdev.h"
@ -311,6 +312,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
event_dev->ca_enqueue = otx2_ssogws_ca_enq;
if (dev->dual_ws) {
event_dev->enqueue = otx2_ssogws_dual_enq;
@ -473,6 +475,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
event_dev->ca_enqueue = otx2_ssogws_dual_ca_enq;
}
event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;

View File

@ -18,7 +18,8 @@ otx2_ca_caps_get(const struct rte_eventdev *dev,
RTE_SET_USED(cdev);
*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND |
RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW;
RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW |
RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
return 0;
}

View File

@ -2,8 +2,8 @@
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_
#define _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_
#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_
#define _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
@ -72,4 +72,4 @@ otx2_handle_crypto_event(uint64_t get_work1)
return (uint64_t)(cop);
}
#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_ */
#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_ */

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2021 Marvell International Ltd.
*/
#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_
#define _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#include <rte_event_crypto_adapter.h>
#include <rte_eventdev.h>
#include <otx2_cryptodev_qp.h>
#include <otx2_worker.h>
static inline uint16_t
otx2_ca_enq(uintptr_t tag_op, const struct rte_event *ev)
{
union rte_event_crypto_metadata *m_data;
struct rte_crypto_op *crypto_op;
struct rte_cryptodev *cdev;
struct otx2_cpt_qp *qp;
uint8_t cdev_id;
uint16_t qp_id;
crypto_op = ev->event_ptr;
if (crypto_op == NULL)
return 0;
if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
m_data = rte_cryptodev_sym_session_get_user_data(
crypto_op->sym->session);
if (m_data == NULL)
goto free_op;
cdev_id = m_data->request_info.cdev_id;
qp_id = m_data->request_info.queue_pair_id;
} else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
crypto_op->private_data_offset) {
m_data = (union rte_event_crypto_metadata *)
((uint8_t *)crypto_op +
crypto_op->private_data_offset);
cdev_id = m_data->request_info.cdev_id;
qp_id = m_data->request_info.queue_pair_id;
} else {
goto free_op;
}
cdev = &rte_cryptodevs[cdev_id];
qp = cdev->data->queue_pairs[qp_id];
if (!ev->sched_type)
otx2_ssogws_head_wait(tag_op);
if (qp->ca_enable)
return cdev->enqueue_burst(qp, &crypto_op, 1);
free_op:
rte_pktmbuf_free(crypto_op->sym->m_src);
rte_crypto_op_free(crypto_op);
rte_errno = EINVAL;
return 0;
}
static uint16_t __rte_hot
otx2_ssogws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
{
struct otx2_ssogws *ws = port;
RTE_SET_USED(nb_events);
return otx2_ca_enq(ws->tag_op, ev);
}
static uint16_t __rte_hot
otx2_ssogws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
{
struct otx2_ssogws_dual *ws = port;
RTE_SET_USED(nb_events);
return otx2_ca_enq(ws->ws_state[!ws->vws].tag_op, ev);
}
#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_ */

View File

@ -10,7 +10,7 @@
#include <otx2_common.h>
#include "otx2_evdev.h"
#include "otx2_evdev_crypto_adptr_dp.h"
#include "otx2_evdev_crypto_adptr_rx.h"
#include "otx2_ethdev_sec_tx.h"
/* SSO Operations */

View File

@ -10,7 +10,7 @@
#include <otx2_common.h>
#include "otx2_evdev.h"
#include "otx2_evdev_crypto_adptr_dp.h"
#include "otx2_evdev_crypto_adptr_rx.h"
/* SSO Operations */
static __rte_always_inline uint16_t