crypto/scheduler: change enqueue and dequeue functions

This patch changes the enqueue and dequeue methods to cryptodev
scheduler PMD. Originally a 2-layer function call is carried out
upon enqueuing or dequeuing a burst of crypto ops. This patch
removes one layer to improve the performance.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
This commit is contained in:
Fan Zhang 2017-03-02 11:12:11 +00:00 committed by Pablo de Lara
parent 884ed3ff8e
commit 211e27a9c2
3 changed files with 24 additions and 57 deletions

View File

@ -61,32 +61,6 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_SOCKET_ID
};
static uint16_t
scheduler_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct scheduler_qp_ctx *qp_ctx = queue_pair;
uint16_t processed_ops;
processed_ops = (*qp_ctx->schedule_enqueue)(qp_ctx, ops,
nb_ops);
return processed_ops;
}
static uint16_t
scheduler_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct scheduler_qp_ctx *qp_ctx = queue_pair;
uint16_t processed_ops;
processed_ops = (*qp_ctx->schedule_dequeue)(qp_ctx, ops,
nb_ops);
return processed_ops;
}
static int
attach_init_slaves(uint8_t scheduler_id,
const uint8_t *slaves, const uint8_t nb_slaves)
@ -146,9 +120,6 @@ cryptodev_scheduler_create(const char *name,
dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
dev->enqueue_burst = scheduler_enqueue_burst;
dev->dequeue_burst = scheduler_dequeue_burst;
sched_ctx = dev->data->dev_private;
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;

View File

@ -98,9 +98,6 @@ struct scheduler_ctx {
struct scheduler_qp_ctx {
void *private_qp_ctx;
rte_cryptodev_scheduler_burst_enqueue_t schedule_enqueue;
rte_cryptodev_scheduler_burst_dequeue_t schedule_dequeue;
struct rte_reorder_buffer *reorder_buf;
uint32_t seqn;
} __rte_cache_aligned;

View File

@ -45,10 +45,10 @@ struct rr_scheduler_qp_ctx {
};
static uint16_t
schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
@ -112,12 +112,11 @@ schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
struct rr_scheduler_qp_ctx *rr_qp_ctx =
gen_qp_ctx->private_qp_ctx;
struct scheduler_qp_ctx *qp_ctx = qp;
struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
@ -148,13 +147,13 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
sessions[i + 3] = ops[i + 3]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 3]->sym->session = sess3->sessions[slave_idx];
ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++;
rte_prefetch0(ops[i + 4]->sym->session);
rte_prefetch0(ops[i + 4]->sym->m_src);
@ -171,7 +170,7 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
ops[i]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
@ -193,10 +192,10 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
static uint16_t
schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
struct scheduler_slave *slave;
uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
uint16_t nb_deq_ops;
@ -230,13 +229,13 @@ schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
struct scheduler_qp_ctx *qp_ctx = (struct scheduler_qp_ctx *)qp;
struct rr_scheduler_qp_ctx *rr_qp_ctx = (qp_ctx->private_qp_ctx);
struct scheduler_slave *slave;
struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf;
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
uint16_t nb_deq_ops, nb_drained_mbufs;
const uint16_t nb_op_ops = nb_ops;
@ -354,6 +353,14 @@ scheduler_start(struct rte_cryptodev *dev)
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint16_t i;
if (sched_ctx->reordering_enabled) {
dev->enqueue_burst = &schedule_enqueue_ordering;
dev->dequeue_burst = &schedule_dequeue_ordering;
} else {
dev->enqueue_burst = &schedule_enqueue;
dev->dequeue_burst = &schedule_dequeue;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
struct rr_scheduler_qp_ctx *rr_qp_ctx =
@ -372,14 +379,6 @@ scheduler_start(struct rte_cryptodev *dev)
rr_qp_ctx->last_enq_slave_idx = 0;
rr_qp_ctx->last_deq_slave_idx = 0;
if (sched_ctx->reordering_enabled) {
qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
} else {
qp_ctx->schedule_enqueue = &schedule_enqueue;
qp_ctx->schedule_dequeue = &schedule_dequeue;
}
}
return 0;