app/crypto-perf: use single mempool

In order to improve memory utilization, a single mempool
is created, containing the crypto operation and mbufs
(one if operation is in-place, two if out-of-place).
This way, a single object is allocated and freed
per operation, reducing the amount of memory in cache,
which improves scalability.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Pablo de Lara 2017-10-04 04:46:13 +01:00
parent c4f916e332
commit bf9d6702ec
8 changed files with 333 additions and 340 deletions

View File

@ -37,7 +37,7 @@
static int
cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
@ -48,10 +48,18 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
src_buf_offset);
/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
if (dst_buf_offset == 0)
sym_op->m_dst = NULL;
else
sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
dst_buf_offset);
/* cipher parameters */
sym_op->cipher.data.length = options->test_buffer_size;
@ -63,7 +71,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
static int
cperf_set_ops_null_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
@ -74,10 +82,18 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
src_buf_offset);
/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
if (dst_buf_offset == 0)
sym_op->m_dst = NULL;
else
sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
dst_buf_offset);
/* auth parameters */
sym_op->auth.data.length = options->test_buffer_size;
@ -89,7 +105,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@ -100,10 +116,18 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
src_buf_offset);
/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
if (dst_buf_offset == 0)
sym_op->m_dst = NULL;
else
sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@ -132,7 +156,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
static int
cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@ -143,10 +167,18 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
src_buf_offset);
/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
if (dst_buf_offset == 0)
sym_op->m_dst = NULL;
else
sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
dst_buf_offset);
if (test_vector->auth_iv.length) {
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
@ -167,9 +199,9 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
buf = sym_op->m_dst;
} else {
tbuf = bufs_in[i];
tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
@ -219,7 +251,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@ -230,10 +262,18 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
src_buf_offset);
/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
if (dst_buf_offset == 0)
sym_op->m_dst = NULL;
else
sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@ -256,9 +296,9 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
buf = sym_op->m_dst;
} else {
tbuf = bufs_in[i];
tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
@ -316,7 +356,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@ -329,10 +369,18 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
src_buf_offset);
/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
if (dst_buf_offset == 0)
sym_op->m_dst = NULL;
else
sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
dst_buf_offset);
/* AEAD parameters */
sym_op->aead.data.length = options->test_buffer_size;
@ -354,9 +402,9 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
buf = sym_op->m_dst;
} else {
tbuf = bufs_in[i];
tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;

View File

@ -47,7 +47,7 @@ typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,

View File

@ -34,75 +34,111 @@
#include "cperf_test_common.h"
static struct rte_mbuf *
cperf_mbuf_create(struct rte_mempool *mempool,
uint32_t segment_sz,
uint32_t segments_nb,
const struct cperf_options *options)
struct obj_params {
uint32_t src_buf_offset;
uint32_t dst_buf_offset;
uint16_t segment_sz;
uint16_t segments_nb;
};
static void
fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
{
struct rte_mbuf *mbuf;
uint8_t *mbuf_data;
uint32_t remaining_bytes = options->max_buffer_size;
uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
mbuf = rte_pktmbuf_alloc(mempool);
if (mbuf == NULL)
goto error;
/* start of buffer is after mbuf structure and priv data */
m->priv_size = 0;
m->buf_addr = (char *)m + mbuf_hdr_size;
m->buf_physaddr = rte_mempool_virt2phy(mp, obj) +
mbuf_offset + mbuf_hdr_size;
m->buf_len = segment_sz;
m->data_len = segment_sz;
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
/* No headroom needed for the buffer */
m->data_off = 0;
if (options->max_buffer_size <= segment_sz)
remaining_bytes = 0;
/* init some constant fields */
m->pool = mp;
m->nb_segs = 1;
m->port = 0xff;
rte_mbuf_refcnt_set(m, 1);
m->next = NULL;
}
static void
fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
uint16_t segments_nb)
{
uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
uint16_t remaining_segments = segments_nb;
struct rte_mbuf *next_mbuf;
phys_addr_t next_seg_phys_addr = rte_mempool_virt2phy(mp, obj) +
mbuf_offset + mbuf_hdr_size;
do {
/* start of buffer is after mbuf structure and priv data */
m->priv_size = 0;
m->buf_addr = (char *)m + mbuf_hdr_size;
m->buf_physaddr = next_seg_phys_addr;
next_seg_phys_addr += mbuf_hdr_size + segment_sz;
m->buf_len = segment_sz;
m->data_len = segment_sz;
/* No headroom needed for the buffer */
m->data_off = 0;
/* init some constant fields */
m->pool = mp;
m->nb_segs = segments_nb;
m->port = 0xff;
rte_mbuf_refcnt_set(m, 1);
next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
mbuf_hdr_size + segment_sz);
m->next = next_mbuf;
m = next_mbuf;
remaining_segments--;
} while (remaining_segments > 0);
m->next = NULL;
}
static void
mempool_obj_init(struct rte_mempool *mp,
void *opaque_arg,
void *obj,
__attribute__((unused)) unsigned int i)
{
struct obj_params *params = opaque_arg;
struct rte_crypto_op *op = obj;
struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
params->src_buf_offset);
/* Set crypto operation */
op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
/* Set source buffer */
op->sym->m_src = m;
if (params->segments_nb == 1)
fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
params->segment_sz);
else
remaining_bytes -= segment_sz;
fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
params->segment_sz, params->segments_nb);
segments_nb--;
while (remaining_bytes) {
struct rte_mbuf *m;
m = rte_pktmbuf_alloc(mempool);
if (m == NULL)
goto error;
rte_pktmbuf_chain(mbuf, m);
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
if (remaining_bytes <= segment_sz)
remaining_bytes = 0;
else
remaining_bytes -= segment_sz;
segments_nb--;
}
/*
* If there was not enough room for the digest at the end
* of the last segment, allocate a new one
*/
if (segments_nb != 0) {
struct rte_mbuf *m;
m = rte_pktmbuf_alloc(mempool);
if (m == NULL)
goto error;
rte_pktmbuf_chain(mbuf, m);
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
}
return mbuf;
error:
if (mbuf != NULL)
rte_pktmbuf_free(mbuf);
return NULL;
/* Set destination buffer */
if (params->dst_buf_offset) {
m = (struct rte_mbuf *) ((uint8_t *) obj +
params->dst_buf_offset);
fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
params->segment_sz);
op->sym->m_dst = m;
} else
op->sym->m_dst = NULL;
}
int
@ -110,120 +146,81 @@ cperf_alloc_common_memory(const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
uint8_t dev_id, uint16_t qp_id,
size_t extra_op_priv_size,
struct rte_mempool **pkt_mbuf_pool_in,
struct rte_mempool **pkt_mbuf_pool_out,
struct rte_mbuf ***mbufs_in,
struct rte_mbuf ***mbufs_out,
struct rte_mempool **crypto_op_pool)
uint32_t *src_buf_offset,
uint32_t *dst_buf_offset,
struct rte_mempool **pool)
{
unsigned int mbuf_idx = 0;
char pool_name[32] = "";
int ret;
snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%u_qp_%u",
dev_id, qp_id);
/* Calculate the object size */
uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
uint16_t crypto_op_private_size = extra_op_priv_size +
test_vector->cipher_iv.length +
test_vector->auth_iv.length +
test_vector->aead_iv.length +
options->aead_aad_sz;
uint16_t crypto_op_total_size = crypto_op_size +
crypto_op_private_size;
uint16_t crypto_op_total_size_padded =
RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
uint32_t max_size = options->max_buffer_size + options->digest_sz;
uint16_t segments_nb = (max_size % options->segment_sz) ?
(max_size / options->segment_sz) + 1 :
max_size / options->segment_sz;
uint32_t obj_size = crypto_op_total_size_padded +
(mbuf_size * segments_nb);
*pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
options->pool_sz * segments_nb, 0, 0,
RTE_PKTMBUF_HEADROOM + options->segment_sz,
rte_socket_id());
if (*pkt_mbuf_pool_in == NULL)
return -1;
/* Generate mbufs_in with plaintext populated for test */
*mbufs_in = (struct rte_mbuf **)rte_malloc(NULL,
(sizeof(struct rte_mbuf *) * options->pool_sz), 0);
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
(*mbufs_in)[mbuf_idx] = cperf_mbuf_create(
*pkt_mbuf_pool_in,
options->segment_sz,
segments_nb,
options);
if ((*mbufs_in)[mbuf_idx] == NULL)
return -1;
}
*mbufs_out = (struct rte_mbuf **)rte_zmalloc(NULL,
(sizeof(struct rte_mbuf *) *
options->pool_sz), 0);
if (options->out_of_place == 1) {
snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%u_qp_%u",
dev_id, qp_id);
*pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM + max_size,
rte_socket_id());
if (*pkt_mbuf_pool_out == NULL)
return -1;
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
(*mbufs_out)[mbuf_idx] = cperf_mbuf_create(
*pkt_mbuf_pool_out, max_size,
1, options);
if ((*mbufs_out)[mbuf_idx] == NULL)
return -1;
}
}
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%u_qp_%u",
snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
dev_id, qp_id);
uint16_t priv_size = RTE_ALIGN_CEIL(test_vector->cipher_iv.length +
test_vector->auth_iv.length + test_vector->aead_iv.length +
extra_op_priv_size, 16) +
RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
*src_buf_offset = crypto_op_total_size_padded;
*crypto_op_pool = rte_crypto_op_pool_create(pool_name,
RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
512, priv_size, rte_socket_id());
if (*crypto_op_pool == NULL)
struct obj_params params = {
.segment_sz = options->segment_sz,
.segments_nb = segments_nb,
.src_buf_offset = crypto_op_total_size_padded,
.dst_buf_offset = 0
};
if (options->out_of_place) {
*dst_buf_offset = *src_buf_offset +
(mbuf_size * segments_nb);
params.dst_buf_offset = *dst_buf_offset;
/* Destination buffer will be one segment only */
obj_size += max_size;
}
*pool = rte_mempool_create_empty(pool_name,
options->pool_sz, obj_size, 512, 0,
rte_socket_id(), 0);
if (*pool == NULL) {
RTE_LOG(ERR, USER1,
"Cannot allocate mempool for device %u\n",
dev_id);
return -1;
}
ret = rte_mempool_set_ops_byname(*pool,
RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
if (ret != 0) {
RTE_LOG(ERR, USER1,
"Error setting mempool handler for device %u\n",
dev_id);
return -1;
}
ret = rte_mempool_populate_default(*pool);
if (ret < 0) {
RTE_LOG(ERR, USER1,
"Error populating mempool for device %u\n",
dev_id);
return -1;
}
rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
return 0;
}
void
cperf_free_common_memory(const struct cperf_options *options,
struct rte_mempool *pkt_mbuf_pool_in,
struct rte_mempool *pkt_mbuf_pool_out,
struct rte_mbuf **mbufs_in,
struct rte_mbuf **mbufs_out,
struct rte_mempool *crypto_op_pool)
{
uint32_t i = 0;
if (mbufs_in) {
while (mbufs_in[i] != NULL &&
i < options->pool_sz)
rte_pktmbuf_free(mbufs_in[i++]);
rte_free(mbufs_in);
}
if (mbufs_out) {
i = 0;
while (mbufs_out[i] != NULL
&& i < options->pool_sz)
rte_pktmbuf_free(mbufs_out[i++]);
rte_free(mbufs_out);
}
if (pkt_mbuf_pool_in)
rte_mempool_free(pkt_mbuf_pool_in);
if (pkt_mbuf_pool_out)
rte_mempool_free(pkt_mbuf_pool_out);
if (crypto_op_pool)
rte_mempool_free(crypto_op_pool);
}

View File

@ -45,18 +45,8 @@ cperf_alloc_common_memory(const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
uint8_t dev_id, uint16_t qp_id,
size_t extra_op_priv_size,
struct rte_mempool **pkt_mbuf_pool_in,
struct rte_mempool **pkt_mbuf_pool_out,
struct rte_mbuf ***mbufs_in,
struct rte_mbuf ***mbufs_out,
struct rte_mempool **crypto_op_pool);
void
cperf_free_common_memory(const struct cperf_options *options,
struct rte_mempool *pkt_mbuf_pool_in,
struct rte_mempool *pkt_mbuf_pool_out,
struct rte_mbuf **mbufs_in,
struct rte_mbuf **mbufs_out,
struct rte_mempool *crypto_op_pool);
uint32_t *src_buf_offset,
uint32_t *dst_buf_offset,
struct rte_mempool **pool);
#endif /* _CPERF_TEST_COMMON_H_ */

View File

@ -50,17 +50,15 @@ struct cperf_latency_ctx {
uint16_t qp_id;
uint8_t lcore_id;
struct rte_mempool *pkt_mbuf_pool_in;
struct rte_mempool *pkt_mbuf_pool_out;
struct rte_mbuf **mbufs_in;
struct rte_mbuf **mbufs_out;
struct rte_mempool *crypto_op_pool;
struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
uint32_t src_buf_offset;
uint32_t dst_buf_offset;
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
struct cperf_op_result *res;
@ -82,11 +80,8 @@ cperf_latency_test_free(struct cperf_latency_ctx *ctx)
rte_cryptodev_sym_session_free(ctx->sess);
}
cperf_free_common_memory(ctx->options,
ctx->pkt_mbuf_pool_in,
ctx->pkt_mbuf_pool_out,
ctx->mbufs_in, ctx->mbufs_out,
ctx->crypto_op_pool);
if (ctx->pool)
rte_mempool_free(ctx->pool);
rte_free(ctx->res);
rte_free(ctx);
@ -126,9 +121,8 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id,
extra_op_priv_size,
&ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
&ctx->mbufs_in, &ctx->mbufs_out,
&ctx->crypto_op_pool) < 0)
&ctx->src_buf_offset, &ctx->dst_buf_offset,
&ctx->pool) < 0)
goto err;
ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
@ -204,7 +198,7 @@ cperf_latency_test_runner(void *arg)
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
uint64_t m_idx = 0, b_idx = 0;
uint64_t b_idx = 0;
uint64_t tsc_val, tsc_end, tsc_start;
uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
@ -219,11 +213,9 @@ cperf_latency_test_runner(void *arg)
ctx->options->total_ops -
enqd_tot;
/* Allocate crypto ops from pool */
if (burst_size != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops, burst_size)) {
/* Allocate objects containing crypto operations and mbufs */
if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
burst_size) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@ -233,8 +225,8 @@ cperf_latency_test_runner(void *arg)
}
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
(ctx->populate_ops)(ops, ctx->src_buf_offset,
ctx->dst_buf_offset,
burst_size, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
@ -263,7 +255,7 @@ cperf_latency_test_runner(void *arg)
/* Free memory for not enqueued operations */
if (ops_enqd != burst_size)
rte_mempool_put_bulk(ctx->crypto_op_pool,
rte_mempool_put_bulk(ctx->pool,
(void **)&ops[ops_enqd],
burst_size - ops_enqd);
@ -279,16 +271,11 @@ cperf_latency_test_runner(void *arg)
}
if (likely(ops_deqd)) {
/*
* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
/* Free crypto ops so they can be reused. */
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
rte_mempool_put_bulk(ctx->crypto_op_pool,
rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
@ -300,9 +287,6 @@ cperf_latency_test_runner(void *arg)
enqd_max = max(ops_enqd, enqd_max);
enqd_min = min(ops_enqd, enqd_min);
m_idx += ops_enqd;
m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
0 : m_idx;
b_idx++;
}
@ -321,7 +305,7 @@ cperf_latency_test_runner(void *arg)
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
rte_mempool_put_bulk(ctx->crypto_op_pool,
rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;

View File

@ -51,12 +51,7 @@ struct cperf_pmd_cyclecount_ctx {
uint16_t qp_id;
uint8_t lcore_id;
struct rte_mempool *pkt_mbuf_pool_in;
struct rte_mempool *pkt_mbuf_pool_out;
struct rte_mbuf **mbufs_in;
struct rte_mbuf **mbufs_out;
struct rte_mempool *crypto_op_pool;
struct rte_mempool *pool;
struct rte_crypto_op **ops;
struct rte_crypto_op **ops_processed;
@ -64,6 +59,9 @@ struct cperf_pmd_cyclecount_ctx {
cperf_populate_ops_t populate_ops;
uint32_t src_buf_offset;
uint32_t dst_buf_offset;
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
@ -95,11 +93,9 @@ cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
rte_cryptodev_sym_session_free(ctx->sess);
}
cperf_free_common_memory(ctx->options,
ctx->pkt_mbuf_pool_in,
ctx->pkt_mbuf_pool_out,
ctx->mbufs_in, ctx->mbufs_out,
ctx->crypto_op_pool);
if (ctx->pool)
rte_mempool_free(ctx->pool);
if (ctx->ops)
rte_free(ctx->ops);
@ -144,9 +140,8 @@ cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
goto err;
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
&ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
&ctx->mbufs_in, &ctx->mbufs_out,
&ctx->crypto_op_pool) < 0)
&ctx->src_buf_offset, &ctx->dst_buf_offset,
&ctx->pool) < 0)
goto err;
ctx->ops = rte_malloc("ops", alloc_sz, 0);
@ -181,16 +176,22 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
test_burst_size);
struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
if (burst_size != rte_crypto_op_bulk_alloc(
state->ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops, burst_size))
return -1;
/* Allocate objects containing crypto operations and mbufs */
if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
burst_size) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
"Consider increasing the pool size "
"with --pool-sz\n");
return -1;
}
/* Setup crypto op, attach mbuf etc */
(state->ctx->populate_ops)(ops,
&state->ctx->mbufs_in[cur_iter_op],
&state->ctx->mbufs_out[cur_iter_op], burst_size,
state->ctx->src_buf_offset,
state->ctx->dst_buf_offset,
burst_size,
state->ctx->sess, state->opts,
state->ctx->test_vector, iv_offset);
@ -204,7 +205,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
}
}
#endif /* CPERF_LINEARIZATION_ENABLE */
rte_mempool_put_bulk(state->ctx->crypto_op_pool, (void **)ops,
rte_mempool_put_bulk(state->ctx->pool, (void **)ops,
burst_size);
}
@ -224,16 +225,22 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
iter_ops_needed - cur_iter_op, test_burst_size);
struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
if (burst_size != rte_crypto_op_bulk_alloc(
state->ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops, burst_size))
return -1;
/* Allocate objects containing crypto operations and mbufs */
if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
burst_size) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
"Consider increasing the pool size "
"with --pool-sz\n");
return -1;
}
/* Setup crypto op, attach mbuf etc */
(state->ctx->populate_ops)(ops,
&state->ctx->mbufs_in[cur_iter_op],
&state->ctx->mbufs_out[cur_iter_op], burst_size,
state->ctx->src_buf_offset,
state->ctx->dst_buf_offset,
burst_size,
state->ctx->sess, state->opts,
state->ctx->test_vector, iv_offset);
}
@ -382,7 +389,7 @@ pmd_cyclecount_bench_burst_sz(
* we may not have processed all ops that we allocated, so
* free everything we've allocated.
*/
rte_mempool_put_bulk(state->ctx->crypto_op_pool,
rte_mempool_put_bulk(state->ctx->pool,
(void **)state->ctx->ops, iter_ops_allocd);
}

View File

@ -44,17 +44,15 @@ struct cperf_throughput_ctx {
uint16_t qp_id;
uint8_t lcore_id;
struct rte_mempool *pkt_mbuf_pool_in;
struct rte_mempool *pkt_mbuf_pool_out;
struct rte_mbuf **mbufs_in;
struct rte_mbuf **mbufs_out;
struct rte_mempool *crypto_op_pool;
struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
uint32_t src_buf_offset;
uint32_t dst_buf_offset;
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
@ -68,11 +66,8 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx)
rte_cryptodev_sym_session_free(ctx->sess);
}
cperf_free_common_memory(ctx->options,
ctx->pkt_mbuf_pool_in,
ctx->pkt_mbuf_pool_out,
ctx->mbufs_in, ctx->mbufs_out,
ctx->crypto_op_pool);
if (ctx->pool)
rte_mempool_free(ctx->pool);
rte_free(ctx);
}
@ -108,9 +103,8 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
goto err;
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
&ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
&ctx->mbufs_in, &ctx->mbufs_out,
&ctx->crypto_op_pool) < 0)
&ctx->src_buf_offset, &ctx->dst_buf_offset,
&ctx->pool) < 0)
goto err;
return ctx;
@ -167,7 +161,7 @@ cperf_throughput_test_runner(void *test_ctx)
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
uint64_t tsc_start, tsc_end, tsc_duration;
uint16_t ops_unused = 0;
@ -183,11 +177,9 @@ cperf_throughput_test_runner(void *test_ctx)
uint16_t ops_needed = burst_size - ops_unused;
/* Allocate crypto ops from pool */
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops, ops_needed)) {
/* Allocate objects containing crypto operations and mbufs */
if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@ -197,10 +189,11 @@ cperf_throughput_test_runner(void *test_ctx)
}
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
(ctx->populate_ops)(ops, ctx->src_buf_offset,
ctx->dst_buf_offset,
ops_needed, ctx->sess,
ctx->options, ctx->test_vector,
iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
@ -245,12 +238,8 @@ cperf_throughput_test_runner(void *test_ctx)
ops_processed, test_burst_size);
if (likely(ops_deqd)) {
/* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
rte_mempool_put_bulk(ctx->crypto_op_pool,
/* Free crypto ops so they can be reused. */
rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
@ -263,9 +252,6 @@ cperf_throughput_test_runner(void *test_ctx)
ops_deqd_failed++;
}
m_idx += ops_needed;
m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
0 : m_idx;
}
/* Dequeue any operations still in the crypto device */
@ -280,9 +266,8 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_deqd == 0)
ops_deqd_failed++;
else {
rte_mempool_put_bulk(ctx->crypto_op_pool,
rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
}

View File

@ -44,17 +44,15 @@ struct cperf_verify_ctx {
uint16_t qp_id;
uint8_t lcore_id;
struct rte_mempool *pkt_mbuf_pool_in;
struct rte_mempool *pkt_mbuf_pool_out;
struct rte_mbuf **mbufs_in;
struct rte_mbuf **mbufs_out;
struct rte_mempool *crypto_op_pool;
struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
uint32_t src_buf_offset;
uint32_t dst_buf_offset;
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
@ -72,11 +70,8 @@ cperf_verify_test_free(struct cperf_verify_ctx *ctx)
rte_cryptodev_sym_session_free(ctx->sess);
}
cperf_free_common_memory(ctx->options,
ctx->pkt_mbuf_pool_in,
ctx->pkt_mbuf_pool_out,
ctx->mbufs_in, ctx->mbufs_out,
ctx->crypto_op_pool);
if (ctx->pool)
rte_mempool_free(ctx->pool);
rte_free(ctx);
}
@ -102,7 +97,7 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
/* IV goes at the end of the cryptop operation */
/* IV goes at the end of the crypto operation */
uint16_t iv_offset = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
@ -112,9 +107,8 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
goto err;
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
&ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
&ctx->mbufs_in, &ctx->mbufs_out,
&ctx->crypto_op_pool) < 0)
&ctx->src_buf_offset, &ctx->dst_buf_offset,
&ctx->pool) < 0)
goto err;
return ctx;
@ -268,7 +262,7 @@ cperf_verify_test_runner(void *test_ctx)
static int only_once;
uint64_t i, m_idx = 0;
uint64_t i;
uint16_t ops_unused = 0;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
@ -308,11 +302,9 @@ cperf_verify_test_runner(void *test_ctx)
uint16_t ops_needed = burst_size - ops_unused;
/* Allocate crypto ops from pool */
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops, ops_needed)) {
/* Allocate objects containing crypto operations and mbufs */
if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@ -322,8 +314,8 @@ cperf_verify_test_runner(void *test_ctx)
}
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
(ctx->populate_ops)(ops, ctx->src_buf_offset,
ctx->dst_buf_offset,
ops_needed, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
@ -363,10 +355,6 @@ cperf_verify_test_runner(void *test_ctx)
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
ops_processed, ctx->options->max_burst_size);
m_idx += ops_needed;
if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
m_idx = 0;
if (ops_deqd == 0) {
/**
* Count dequeue polls which didn't return any
@ -381,13 +369,10 @@ cperf_verify_test_runner(void *test_ctx)
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
/* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
rte_crypto_op_free(ops_processed[i]);
}
/* Free crypto ops so they can be reused. */
rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@ -409,13 +394,10 @@ cperf_verify_test_runner(void *test_ctx)
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
/* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
rte_crypto_op_free(ops_processed[i]);
}
/* Free crypto ops so they can be reused. */
rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}