crypto: support scatter-gather in software drivers

This patch introduces RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER feature flag
informing that selected crypto device supports segmented mbufs natively
and doesn't need to be coalesced before crypto operation.

While using segmented buffers in crypto devices may have unpredictable
results, for PMDs which doesn't support it natively, additional check is
made for debug compilation.

Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
This commit is contained in:
Tomasz Kulasek 2017-01-13 16:23:15 +01:00 committed by Pablo de Lara
parent ce74457449
commit 2d03ec6abd
8 changed files with 77 additions and 6 deletions

View File

@ -377,6 +377,20 @@ aesni_gcm_pmd_enqueue_burst(void *queue_pair,
break;
}
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
GCM_LOG_ERR("PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
qp->qp_stats.enqueue_err_count++;
break;
}
#endif
retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;

View File

@ -571,15 +571,28 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
int i, processed_jobs = 0;
for (i = 0; i < nb_ops; i++) {
#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
if (unlikely(ops[i]->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
MB_LOG_ERR("PMD only supports symmetric crypto "
"operation requests, op (%p) is not a "
"symmetric operation.", op);
"symmetric operation.", ops[i]);
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
MB_LOG_ERR("PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
#endif
sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
qp->stats.enqueue_err_count++;

View File

@ -455,6 +455,19 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
curr_c_op = ops[i];
#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
(curr_c_op->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
curr_c_op->sym->m_dst))) {
KASUMI_LOG_ERR("PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", curr_c_op);
curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
break;
}
#endif
/* Set status as enqueued (not processed yet) by default. */
curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;

View File

@ -216,7 +216,8 @@ cryptodev_null_create(const char *name,
dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = dev->data->dev_private;

View File

@ -330,6 +330,21 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
unsigned i;
unsigned enqueued_ops, processed_ops;
#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
for (i = 0; i < num_ops; i++) {
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return 0;
}
}
#endif
switch (session->op) {
case SNOW3G_OP_ONLY_CIPHER:
processed_ops = process_snow3g_cipher_op(ops,

View File

@ -211,6 +211,19 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
break;
}
#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
ZUC_LOG_ERR("PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
break;
}
#endif
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?

View File

@ -211,13 +211,13 @@ rte_cryptodev_get_feature_name(uint64_t flag)
return "CPU_AESNI";
case RTE_CRYPTODEV_FF_HW_ACCELERATED:
return "HW_ACCELERATED";
case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
return "MBUF_SCATTER_GATHER";
default:
return NULL;
}
}
int
rte_cryptodev_create_vdev(const char *name, const char *args)
{

View File

@ -227,6 +227,8 @@ struct rte_cryptodev_capabilities {
/**< Operations are off-loaded to an external hardware accelerator */
#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
/**< Utilises CPU SIMD AVX512 instructions */
#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
/**< Scatter-gather mbufs are supported */
/**