crypto/octeontx: add burst enqueue
Signed-off-by: Ankur Dwivedi <ankur.dwivedi@caviumnetworks.com> Signed-off-by: Anoob Joseph <anoob.joseph@caviumnetworks.com> Signed-off-by: Murthy NSSR <nidadavolu.murthy@caviumnetworks.com> Signed-off-by: Nithin Dabilpuram <nithin.dabilpuram@caviumnetworks.com> Signed-off-by: Ragothaman Jayaraman <rjayaraman@caviumnetworks.com> Signed-off-by: Srisivasubramanian S <ssrinivasan@caviumnetworks.com> Signed-off-by: Tejasree Kondoj <kondoj.tejasree@caviumnetworks.com>
This commit is contained in:
parent
0e9741aa16
commit
ac4d88afc7
@ -15,6 +15,9 @@
|
||||
*/
|
||||
#define CRYPTO_OCTEONTX 0x1
|
||||
|
||||
/* Default command timeout in seconds */
|
||||
#define DEFAULT_COMMAND_TIMEOUT 4
|
||||
|
||||
#define CPT_COUNT_THOLD 32
|
||||
#define CPT_TIMER_THOLD 0x3F
|
||||
|
||||
|
@ -5,6 +5,9 @@
|
||||
#ifndef _CPT_REQUEST_MGR_H_
|
||||
#define _CPT_REQUEST_MGR_H_
|
||||
|
||||
#include <rte_branch_prediction.h>
|
||||
#include <rte_cycles.h>
|
||||
|
||||
#include "cpt_common.h"
|
||||
#include "cpt_mcode_defines.h"
|
||||
|
||||
@ -34,6 +37,45 @@ cpt_get_session_size(void)
|
||||
return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
|
||||
}
|
||||
|
||||
static __rte_always_inline int32_t __hot
|
||||
cpt_enqueue_req(struct cpt_instance *instance, struct pending_queue *pqueue,
|
||||
void *req)
|
||||
{
|
||||
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
|
||||
int32_t ret = 0;
|
||||
|
||||
if (unlikely(!req))
|
||||
return 0;
|
||||
|
||||
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
|
||||
return -EAGAIN;
|
||||
|
||||
fill_cpt_inst(instance, req);
|
||||
|
||||
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
|
||||
|
||||
/* Fill time_out cycles */
|
||||
user_req->time_out = rte_get_timer_cycles() +
|
||||
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
|
||||
user_req->extra_time = 0;
|
||||
|
||||
/* Default mode of software queue */
|
||||
mark_cpt_inst(instance);
|
||||
|
||||
pqueue->rid_queue[pqueue->enq_tail].rid =
|
||||
(uintptr_t)user_req;
|
||||
/* We will use soft queue length here to limit
|
||||
* requests
|
||||
*/
|
||||
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
|
||||
pqueue->pending_count += 1;
|
||||
|
||||
CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
|
||||
"op: %p", user_req, user_req->op);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __rte_always_inline int __hot
|
||||
cpt_pmd_crypto_operation(struct cpt_instance *instance,
|
||||
struct rte_crypto_op *op, struct pending_queue *pqueue,
|
||||
@ -45,7 +87,6 @@ cpt_pmd_crypto_operation(struct cpt_instance *instance,
|
||||
int ret = 0;
|
||||
uint64_t cpt_op;
|
||||
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
|
||||
RTE_SET_USED(pqueue);
|
||||
|
||||
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
|
||||
int sess_len;
|
||||
@ -83,6 +124,9 @@ cpt_pmd_crypto_operation(struct cpt_instance *instance,
|
||||
goto req_fail;
|
||||
}
|
||||
|
||||
/* Enqueue prepared instruction to HW */
|
||||
ret = cpt_enqueue_req(instance, pqueue, prep_req);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
if (unlikely(ret == -EAGAIN))
|
||||
goto req_fail;
|
||||
|
@ -187,6 +187,15 @@ otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf)
|
||||
return vqx_dbell.s.dbell_cnt;
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
|
||||
{
|
||||
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
|
||||
/* Memory barrier to flush pending writes */
|
||||
rte_smp_wmb();
|
||||
otx_cpt_write_vq_doorbell(cptvf, count);
|
||||
}
|
||||
|
||||
static __rte_always_inline void *
|
||||
get_cpt_inst(struct command_queue *cqueue)
|
||||
{
|
||||
@ -237,7 +246,16 @@ mark_cpt_inst(struct cpt_instance *instance)
|
||||
queue->idx = 0;
|
||||
queue->cchunk = cchunk;
|
||||
}
|
||||
}
|
||||
|
||||
static __rte_always_inline uint8_t
|
||||
check_nb_command_id(struct cpt_request_info *user_req,
|
||||
struct cpt_instance *instance)
|
||||
{
|
||||
/* Required for dequeue operation. Adding a dummy routine for now */
|
||||
RTE_SET_USED(user_req);
|
||||
RTE_SET_USED(instance);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
|
||||
|
@ -341,6 +341,31 @@ otx_cpt_session_clear(struct rte_cryptodev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
|
||||
{
|
||||
struct cpt_instance *instance = (struct cpt_instance *)qptr;
|
||||
uint16_t count = 0;
|
||||
int ret;
|
||||
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
|
||||
struct pending_queue *pqueue = &cptvf->pqueue;
|
||||
|
||||
count = DEFAULT_CMD_QLEN - pqueue->pending_count;
|
||||
if (nb_ops > count)
|
||||
nb_ops = count;
|
||||
|
||||
count = 0;
|
||||
while (likely(count < nb_ops)) {
|
||||
ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
|
||||
otx_cryptodev_driver_id);
|
||||
if (unlikely(ret))
|
||||
break;
|
||||
count++;
|
||||
}
|
||||
otx_cpt_ring_dbell(instance, count);
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct rte_cryptodev_ops cptvf_ops = {
|
||||
/* Device related operations */
|
||||
.dev_configure = otx_cpt_dev_config,
|
||||
@ -432,7 +457,7 @@ otx_cpt_dev_create(struct rte_cryptodev *c_dev)
|
||||
|
||||
c_dev->dev_ops = &cptvf_ops;
|
||||
|
||||
c_dev->enqueue_burst = NULL;
|
||||
c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
|
||||
c_dev->dequeue_burst = NULL;
|
||||
|
||||
c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
|
||||
|
Loading…
Reference in New Issue
Block a user