f8be1786b1
This patchset introduce new application which allows measuring performance parameters of PMDs available in crypto tree. The goal of this application is to replace existing performance tests in app/test. Parameters available are: throughput (--ptest throughput) and latency (--ptest latency). User can use multiply cores to run tests on but only one type of crypto PMD can be measured during single application execution. Cipher parameters, type of device, type of operation and chain mode have to be specified in the command line as application parameters. These parameters are checked using device capabilities structure. Couple of new library functions in librte_cryptodev are introduced for application use. To build the application a CONFIG_RTE_APP_CRYPTO_PERF flag has to be set (it is set by default). Example of usage: -c 0xc0 --vdev crypto_aesni_mb_pmd -w 0000:00:00.0 -- --ptest throughput --devtype crypto_aesni_mb --optype cipher-then-auth --cipher-algo aes-cbc --cipher-op encrypt --cipher-key-sz 16 --auth-algo sha1-hmac --auth-op generate --auth-key-sz 64 --auth-digest-sz 12 --total-ops 10000000 --burst-sz 32 --buffer-sz 64 Signed-off-by: Declan Doherty <declan.doherty@intel.com> Signed-off-by: Slawomir Mrozowicz <slawomirx.mrozowicz@intel.com> Signed-off-by: Piotr Azarewicz <piotrx.t.azarewicz@intel.com> Signed-off-by: Marcin Kerlin <marcinx.kerlin@intel.com> Signed-off-by: Michal Kobylinski <michalx.kobylinski@intel.com>
476 lines
14 KiB
C
476 lines
14 KiB
C
/*-
|
|
* BSD LICENSE
|
|
*
|
|
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
*
|
|
* * Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in
|
|
* the documentation and/or other materials provided with the
|
|
* distribution.
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
* contributors may be used to endorse or promote products derived
|
|
* from this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include <rte_cryptodev.h>
|
|
|
|
#include "cperf_ops.h"
|
|
#include "cperf_test_vectors.h"
|
|
|
|
static int
|
|
cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
|
|
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
|
|
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector __rte_unused)
|
|
{
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < nb_ops; i++) {
|
|
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
|
|
|
|
rte_crypto_op_attach_sym_session(ops[i], sess);
|
|
|
|
sym_op->m_src = bufs_in[i];
|
|
sym_op->m_dst = bufs_out[i];
|
|
|
|
/* cipher parameters */
|
|
sym_op->cipher.data.length = options->buffer_sz;
|
|
sym_op->cipher.data.offset = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
cperf_set_ops_null_auth(struct rte_crypto_op **ops,
|
|
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
|
|
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector __rte_unused)
|
|
{
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < nb_ops; i++) {
|
|
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
|
|
|
|
rte_crypto_op_attach_sym_session(ops[i], sess);
|
|
|
|
sym_op->m_src = bufs_in[i];
|
|
sym_op->m_dst = bufs_out[i];
|
|
|
|
/* auth parameters */
|
|
sym_op->auth.data.length = options->buffer_sz;
|
|
sym_op->auth.data.offset = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
cperf_set_ops_cipher(struct rte_crypto_op **ops,
|
|
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
|
|
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector)
|
|
{
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < nb_ops; i++) {
|
|
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
|
|
|
|
rte_crypto_op_attach_sym_session(ops[i], sess);
|
|
|
|
sym_op->m_src = bufs_in[i];
|
|
sym_op->m_dst = bufs_out[i];
|
|
|
|
/* cipher parameters */
|
|
sym_op->cipher.iv.data = test_vector->iv.data;
|
|
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
|
|
sym_op->cipher.iv.length = test_vector->iv.length;
|
|
|
|
sym_op->cipher.data.length = options->buffer_sz;
|
|
sym_op->cipher.data.offset = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
cperf_set_ops_auth(struct rte_crypto_op **ops,
|
|
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
|
|
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector)
|
|
{
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < nb_ops; i++) {
|
|
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
|
|
|
|
rte_crypto_op_attach_sym_session(ops[i], sess);
|
|
|
|
sym_op->m_src = bufs_in[i];
|
|
sym_op->m_dst = bufs_out[i];
|
|
|
|
/* authentication parameters */
|
|
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
|
|
sym_op->auth.digest.data = test_vector->digest.data;
|
|
sym_op->auth.digest.phys_addr =
|
|
test_vector->digest.phys_addr;
|
|
sym_op->auth.digest.length = options->auth_digest_sz;
|
|
} else {
|
|
|
|
uint32_t offset = options->buffer_sz;
|
|
struct rte_mbuf *buf, *tbuf;
|
|
|
|
if (options->out_of_place) {
|
|
buf = bufs_out[i];
|
|
} else {
|
|
buf = bufs_in[i];
|
|
|
|
tbuf = buf;
|
|
while ((tbuf->next != NULL) &&
|
|
(offset >= tbuf->data_len)) {
|
|
offset -= tbuf->data_len;
|
|
tbuf = tbuf->next;
|
|
}
|
|
}
|
|
|
|
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
|
|
uint8_t *, offset);
|
|
sym_op->auth.digest.phys_addr =
|
|
rte_pktmbuf_mtophys_offset(buf, offset);
|
|
sym_op->auth.digest.length = options->auth_digest_sz;
|
|
sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
|
|
sym_op->auth.aad.data = test_vector->aad.data;
|
|
sym_op->auth.aad.length = options->auth_aad_sz;
|
|
|
|
}
|
|
|
|
sym_op->auth.data.length = options->buffer_sz;
|
|
sym_op->auth.data.offset = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
|
|
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
|
|
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector)
|
|
{
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < nb_ops; i++) {
|
|
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
|
|
|
|
rte_crypto_op_attach_sym_session(ops[i], sess);
|
|
|
|
sym_op->m_src = bufs_in[i];
|
|
sym_op->m_dst = bufs_out[i];
|
|
|
|
/* cipher parameters */
|
|
sym_op->cipher.iv.data = test_vector->iv.data;
|
|
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
|
|
sym_op->cipher.iv.length = test_vector->iv.length;
|
|
|
|
sym_op->cipher.data.length = options->buffer_sz;
|
|
sym_op->cipher.data.offset = 0;
|
|
|
|
/* authentication parameters */
|
|
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
|
|
sym_op->auth.digest.data = test_vector->digest.data;
|
|
sym_op->auth.digest.phys_addr =
|
|
test_vector->digest.phys_addr;
|
|
sym_op->auth.digest.length = options->auth_digest_sz;
|
|
} else {
|
|
|
|
uint32_t offset = options->buffer_sz;
|
|
struct rte_mbuf *buf, *tbuf;
|
|
|
|
if (options->out_of_place) {
|
|
buf = bufs_out[i];
|
|
} else {
|
|
buf = bufs_in[i];
|
|
|
|
tbuf = buf;
|
|
while ((tbuf->next != NULL) &&
|
|
(offset >= tbuf->data_len)) {
|
|
offset -= tbuf->data_len;
|
|
tbuf = tbuf->next;
|
|
}
|
|
}
|
|
|
|
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
|
|
uint8_t *, offset);
|
|
sym_op->auth.digest.phys_addr =
|
|
rte_pktmbuf_mtophys_offset(buf, offset);
|
|
sym_op->auth.digest.length = options->auth_digest_sz;
|
|
sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
|
|
sym_op->auth.aad.data = test_vector->aad.data;
|
|
sym_op->auth.aad.length = options->auth_aad_sz;
|
|
}
|
|
|
|
sym_op->auth.data.length = options->buffer_sz;
|
|
sym_op->auth.data.offset = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
cperf_set_ops_aead(struct rte_crypto_op **ops,
|
|
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
|
|
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector)
|
|
{
|
|
uint16_t i;
|
|
|
|
for (i = 0; i < nb_ops; i++) {
|
|
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
|
|
|
|
rte_crypto_op_attach_sym_session(ops[i], sess);
|
|
|
|
sym_op->m_src = bufs_in[i];
|
|
sym_op->m_dst = bufs_out[i];
|
|
|
|
/* cipher parameters */
|
|
sym_op->cipher.iv.data = test_vector->iv.data;
|
|
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
|
|
sym_op->cipher.iv.length = test_vector->iv.length;
|
|
|
|
sym_op->cipher.data.length = options->buffer_sz;
|
|
sym_op->cipher.data.offset =
|
|
RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
|
|
|
|
sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
|
|
sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
|
|
sym_op->auth.aad.length = options->auth_aad_sz;
|
|
|
|
/* authentication parameters */
|
|
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
|
|
sym_op->auth.digest.data = test_vector->digest.data;
|
|
sym_op->auth.digest.phys_addr =
|
|
test_vector->digest.phys_addr;
|
|
sym_op->auth.digest.length = options->auth_digest_sz;
|
|
} else {
|
|
|
|
uint32_t offset = sym_op->cipher.data.length +
|
|
sym_op->cipher.data.offset;
|
|
struct rte_mbuf *buf, *tbuf;
|
|
|
|
if (options->out_of_place) {
|
|
buf = bufs_out[i];
|
|
} else {
|
|
buf = bufs_in[i];
|
|
|
|
tbuf = buf;
|
|
while ((tbuf->next != NULL) &&
|
|
(offset >= tbuf->data_len)) {
|
|
offset -= tbuf->data_len;
|
|
tbuf = tbuf->next;
|
|
}
|
|
}
|
|
|
|
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
|
|
uint8_t *, offset);
|
|
sym_op->auth.digest.phys_addr =
|
|
rte_pktmbuf_mtophys_offset(buf, offset);
|
|
|
|
sym_op->auth.digest.length = options->auth_digest_sz;
|
|
}
|
|
|
|
sym_op->auth.data.length = options->buffer_sz;
|
|
sym_op->auth.data.offset = options->auth_aad_sz;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct rte_cryptodev_sym_session *
|
|
cperf_create_session(uint8_t dev_id,
|
|
const struct cperf_options *options,
|
|
const struct cperf_test_vector *test_vector)
|
|
{
|
|
struct rte_crypto_sym_xform cipher_xform;
|
|
struct rte_crypto_sym_xform auth_xform;
|
|
struct rte_cryptodev_sym_session *sess = NULL;
|
|
|
|
/*
|
|
* cipher only
|
|
*/
|
|
if (options->op_type == CPERF_CIPHER_ONLY) {
|
|
cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
|
|
cipher_xform.next = NULL;
|
|
cipher_xform.cipher.algo = options->cipher_algo;
|
|
cipher_xform.cipher.op = options->cipher_op;
|
|
|
|
/* cipher different than null */
|
|
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
|
|
cipher_xform.cipher.key.data =
|
|
test_vector->cipher_key.data;
|
|
cipher_xform.cipher.key.length =
|
|
test_vector->cipher_key.length;
|
|
}
|
|
/* create crypto session */
|
|
sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
|
|
/*
|
|
* auth only
|
|
*/
|
|
} else if (options->op_type == CPERF_AUTH_ONLY) {
|
|
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
|
|
auth_xform.next = NULL;
|
|
auth_xform.auth.algo = options->auth_algo;
|
|
auth_xform.auth.op = options->auth_op;
|
|
|
|
/* auth different than null */
|
|
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
|
|
auth_xform.auth.digest_length =
|
|
options->auth_digest_sz;
|
|
auth_xform.auth.add_auth_data_length =
|
|
options->auth_aad_sz;
|
|
auth_xform.auth.key.length =
|
|
test_vector->auth_key.length;
|
|
auth_xform.auth.key.data = test_vector->auth_key.data;
|
|
}
|
|
/* create crypto session */
|
|
sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
|
|
/*
|
|
* cipher and auth
|
|
*/
|
|
} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
|
|
|| options->op_type == CPERF_AUTH_THEN_CIPHER
|
|
|| options->op_type == CPERF_AEAD) {
|
|
|
|
/*
|
|
* cipher
|
|
*/
|
|
cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
|
|
cipher_xform.next = NULL;
|
|
cipher_xform.cipher.algo = options->cipher_algo;
|
|
cipher_xform.cipher.op = options->cipher_op;
|
|
|
|
/* cipher different than null */
|
|
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
|
|
cipher_xform.cipher.key.data =
|
|
test_vector->cipher_key.data;
|
|
cipher_xform.cipher.key.length =
|
|
test_vector->cipher_key.length;
|
|
}
|
|
|
|
/*
|
|
* auth
|
|
*/
|
|
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
|
|
auth_xform.next = NULL;
|
|
auth_xform.auth.algo = options->auth_algo;
|
|
auth_xform.auth.op = options->auth_op;
|
|
|
|
/* auth different than null */
|
|
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
|
|
auth_xform.auth.digest_length = options->auth_digest_sz;
|
|
auth_xform.auth.add_auth_data_length =
|
|
options->auth_aad_sz;
|
|
/* auth options for aes gcm */
|
|
if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
|
|
options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
|
|
auth_xform.auth.key.length = 0;
|
|
auth_xform.auth.key.data = NULL;
|
|
} else { /* auth options for others */
|
|
auth_xform.auth.key.length =
|
|
test_vector->auth_key.length;
|
|
auth_xform.auth.key.data =
|
|
test_vector->auth_key.data;
|
|
}
|
|
}
|
|
|
|
/* create crypto session for aes gcm */
|
|
if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
|
|
if (options->cipher_op ==
|
|
RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
|
|
cipher_xform.next = &auth_xform;
|
|
/* create crypto session */
|
|
sess = rte_cryptodev_sym_session_create(dev_id,
|
|
&cipher_xform);
|
|
} else { /* decrypt */
|
|
auth_xform.next = &cipher_xform;
|
|
/* create crypto session */
|
|
sess = rte_cryptodev_sym_session_create(dev_id,
|
|
&auth_xform);
|
|
}
|
|
} else { /* create crypto session for other */
|
|
/* cipher then auth */
|
|
if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
|
|
cipher_xform.next = &auth_xform;
|
|
/* create crypto session */
|
|
sess = rte_cryptodev_sym_session_create(dev_id,
|
|
&cipher_xform);
|
|
} else { /* auth then cipher */
|
|
auth_xform.next = &cipher_xform;
|
|
/* create crypto session */
|
|
sess = rte_cryptodev_sym_session_create(dev_id,
|
|
&auth_xform);
|
|
}
|
|
}
|
|
}
|
|
return sess;
|
|
}
|
|
|
|
int
|
|
cperf_get_op_functions(const struct cperf_options *options,
|
|
struct cperf_op_fns *op_fns)
|
|
{
|
|
memset(op_fns, 0, sizeof(struct cperf_op_fns));
|
|
|
|
op_fns->sess_create = cperf_create_session;
|
|
|
|
if (options->op_type == CPERF_AEAD
|
|
|| options->op_type == CPERF_AUTH_THEN_CIPHER
|
|
|| options->op_type == CPERF_CIPHER_THEN_AUTH) {
|
|
if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
|
|
options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
|
|
op_fns->populate_ops = cperf_set_ops_aead;
|
|
else
|
|
op_fns->populate_ops = cperf_set_ops_cipher_auth;
|
|
return 0;
|
|
}
|
|
if (options->op_type == CPERF_AUTH_ONLY) {
|
|
if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
|
|
op_fns->populate_ops = cperf_set_ops_null_auth;
|
|
else
|
|
op_fns->populate_ops = cperf_set_ops_auth;
|
|
return 0;
|
|
}
|
|
if (options->op_type == CPERF_CIPHER_ONLY) {
|
|
if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
|
|
op_fns->populate_ops = cperf_set_ops_null_cipher;
|
|
else
|
|
op_fns->populate_ops = cperf_set_ops_cipher;
|
|
return 0;
|
|
}
|
|
|
|
return -1;
|
|
}
|