crypto/dpaa2_sec: support atomic queues
Queues attached to event crypto device can be parallel, atomic and ordered. This patch add support for atomic queues processing for dpaa2_sec queues. Signed-off-by: Ashish Jain <ashish.jain@nxp.com> Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
parent
bffc7d561c
commit
a77db24643
@ -1210,6 +1210,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
|
||||
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
|
||||
struct qbman_swp *swp;
|
||||
uint16_t num_tx = 0;
|
||||
uint32_t flags[MAX_TX_RING_SLOTS] = {0};
|
||||
/*todo - need to support multiple buffer pools */
|
||||
uint16_t bpid;
|
||||
struct rte_mempool *mb_pool;
|
||||
@ -1241,6 +1242,15 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
|
||||
dpaa2_eqcr_size : nb_ops;
|
||||
|
||||
for (loop = 0; loop < frames_to_send; loop++) {
|
||||
if ((*ops)->sym->m_src->seqn) {
|
||||
uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
|
||||
|
||||
flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
|
||||
DPAA2_PER_LCORE_DQRR_SIZE--;
|
||||
DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
|
||||
(*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
|
||||
}
|
||||
|
||||
/*Clear the unused FD fields before sending*/
|
||||
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
|
||||
mb_pool = (*ops)->sym->m_src->pool;
|
||||
@ -1257,7 +1267,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
|
||||
while (loop < frames_to_send) {
|
||||
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
|
||||
&fd_arr[loop],
|
||||
NULL,
|
||||
&flags[loop],
|
||||
frames_to_send - loop);
|
||||
}
|
||||
|
||||
@ -1282,6 +1292,9 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
|
||||
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
|
||||
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
|
||||
|
||||
diff = len - mbuf->pkt_len;
|
||||
mbuf->pkt_len += diff;
|
||||
mbuf->data_len += diff;
|
||||
op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
|
||||
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
|
||||
op->sym->aead.digest.phys_addr = 0L;
|
||||
@ -1292,9 +1305,6 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
|
||||
mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
|
||||
else
|
||||
mbuf->data_off += SEC_FLC_DHR_INBOUND;
|
||||
diff = len - mbuf->pkt_len;
|
||||
mbuf->pkt_len += diff;
|
||||
mbuf->data_len += diff;
|
||||
|
||||
return op;
|
||||
}
|
||||
@ -2880,6 +2890,38 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
|
||||
|
||||
qbman_swp_dqrr_consume(swp, dq);
|
||||
}
|
||||
static void
|
||||
dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
|
||||
const struct qbman_fd *fd,
|
||||
const struct qbman_result *dq,
|
||||
struct dpaa2_queue *rxq,
|
||||
struct rte_event *ev)
|
||||
{
|
||||
uint8_t dqrr_index;
|
||||
struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
|
||||
/* Prefetching mbuf */
|
||||
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
|
||||
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
|
||||
|
||||
/* Prefetching ipsec crypto_op stored in priv data of mbuf */
|
||||
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
|
||||
|
||||
ev->flow_id = rxq->ev.flow_id;
|
||||
ev->sub_event_type = rxq->ev.sub_event_type;
|
||||
ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
|
||||
ev->op = RTE_EVENT_OP_NEW;
|
||||
ev->sched_type = rxq->ev.sched_type;
|
||||
ev->queue_id = rxq->ev.queue_id;
|
||||
ev->priority = rxq->ev.priority;
|
||||
|
||||
ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
|
||||
(rxq->dev))->driver_id);
|
||||
dqrr_index = qbman_get_dqrr_idx(dq);
|
||||
crypto_op->sym->m_src->seqn = dqrr_index + 1;
|
||||
DPAA2_PER_LCORE_DQRR_SIZE++;
|
||||
DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
|
||||
DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
|
||||
}
|
||||
|
||||
int
|
||||
dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
|
||||
@ -2895,6 +2937,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
|
||||
|
||||
if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
|
||||
qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
|
||||
else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
|
||||
qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
@ -2906,7 +2950,10 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
|
||||
|
||||
cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
|
||||
cfg.user_ctx = (size_t)(qp);
|
||||
|
||||
if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
|
||||
cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
|
||||
cfg.order_preservation_en = 1;
|
||||
}
|
||||
ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
|
||||
qp_id, &cfg);
|
||||
if (ret) {
|
||||
|
Loading…
Reference in New Issue
Block a user