crypto/aesni_gcm: do crypto op in dequeue function

There is bug when more crypto ops are enqueued than dequeued.
The return value is not checked when trying to enqueue the
processed crypto op into the internal ring, which in the case of being
full will results in crypto ops and mbufs being leaked.
The issue is more obvious with different cores doing enqueue/dequeue.

This patch moves the crypto operation to the dequeue function which
fixes the above issue without having to check for the number of free
entries in the ring.

Fixes: eec136f3c54f ("aesni_gcm: add driver for AES-GCM crypto operations")

Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
This commit is contained in:
Sergio Gonzalez Monroy 2017-03-29 14:42:53 +01:00 committed by Pablo de Lara
parent 37f075dad1
commit ee7b601f1b

View File

@ -375,55 +375,58 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
rte_ring_enqueue(qp->processed_pkts, (void *)op);
}
static uint16_t
aesni_gcm_pmd_enqueue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_session *sess;
struct aesni_gcm_qp *qp = queue_pair;
int i, retval = 0;
for (i = 0; i < nb_ops; i++) {
sess = aesni_gcm_get_session(qp, ops[i]->sym);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.enqueue_err_count++;
break;
}
retval = process_gcm_crypto_op(ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.enqueue_err_count++;
break;
}
handle_completed_gcm_crypto_op(qp, ops[i]);
qp->qp_stats.enqueued_count++;
}
return i;
}
static uint16_t
aesni_gcm_pmd_dequeue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_session *sess;
struct aesni_gcm_qp *qp = queue_pair;
unsigned nb_dequeued;
int retval = 0;
unsigned int i, nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
(void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
for (i = 0; i < nb_dequeued; i++) {
sess = aesni_gcm_get_session(qp, ops[i]->sym);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
retval = process_gcm_crypto_op(ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
handle_completed_gcm_crypto_op(qp, ops[i]);
}
qp->qp_stats.dequeued_count += i;
return i;
}
static uint16_t
aesni_gcm_pmd_enqueue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_qp *qp = queue_pair;
unsigned int nb_enqueued;
nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
(void **)ops, nb_ops, NULL);
qp->qp_stats.enqueued_count += nb_enqueued;
return nb_enqueued;
}
static int aesni_gcm_remove(const char *name);