modules/crypto/compress: changes to support DPDK mbuf changes

Need to manage how we store IO context based on DPDK updates
made in 19.11.

Fixes issue #1671

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: If1183808cd30987b6c999912f563949b7ade7fcb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5799
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
paul luse 2021-01-06 21:49:42 +00:00 committed by Tomasz Zawadzki
parent c3d5069947
commit ec2e6e2b91
4 changed files with 81 additions and 28 deletions

View File

@ -49,6 +49,16 @@
#include <rte_bus_vdev.h>
#include <rte_compressdev.h>
#include <rte_comp.h>
#include <rte_mbuf_dyn.h>
/* Used to store IO context in mbuf */
static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
.name = "context_reduce",
.size = sizeof(uint64_t),
.align = __alignof__(uint64_t),
.flags = 0,
};
static int g_mbuf_offset;
#define NUM_MAX_XFORMS 2
#define NUM_MAX_INFLIGHT_OPS 128
@ -357,6 +367,12 @@ vbdev_init_compress_drivers(void)
return -EINVAL;
}
g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context);
if (g_mbuf_offset < 0) {
SPDK_ERRLOG("error registering dynamic field with DPDK\n");
return -EINVAL;
}
g_mbuf_mp = rte_pktmbuf_pool_create("comp_mbuf_mp", NUM_MBUFS, POOL_CACHE_SIZE,
sizeof(struct rte_mbuf), 0, rte_socket_id());
if (g_mbuf_mp == NULL) {
@ -457,7 +473,7 @@ _setup_compress_mbuf(struct rte_mbuf **mbufs, int *mbuf_total, uint64_t *total_l
*total_length += iovs[iov_index].iov_len;
}
assert(mbufs[mbuf_index] != NULL);
mbufs[mbuf_index]->userdata = reduce_cb_arg;
*RTE_MBUF_DYNFIELD(mbufs[mbuf_index], g_mbuf_offset, uint64_t *) = (uint64_t)reduce_cb_arg;
updated_length = iovs[iov_index].iov_len;
phys_addr = spdk_vtophys((void *)current_base, &updated_length);
@ -484,7 +500,7 @@ _setup_compress_mbuf(struct rte_mbuf **mbufs, int *mbuf_total, uint64_t *total_l
}
(*mbuf_total)++;
mbuf_index++;
mbufs[mbuf_index]->userdata = reduce_cb_arg;
*RTE_MBUF_DYNFIELD(mbufs[mbuf_index], g_mbuf_offset, uint64_t *) = (uint64_t)reduce_cb_arg;
current_base += updated_length;
phys_addr = spdk_vtophys((void *)current_base, &remainder);
/* assert we don't cross another */
@ -656,8 +672,8 @@ comp_dev_poller(void *args)
num_deq = rte_compressdev_dequeue_burst(cdev_id, comp_bdev->device_qp->qp, deq_ops,
NUM_MAX_INFLIGHT_OPS);
for (i = 0; i < num_deq; i++) {
reduce_args = (struct spdk_reduce_vol_cb_args *)deq_ops[i]->m_src->userdata;
reduce_args = (struct spdk_reduce_vol_cb_args *)*RTE_MBUF_DYNFIELD(deq_ops[i]->m_src, g_mbuf_offset,
uint64_t *);
if (deq_ops[i]->status == RTE_COMP_OP_STATUS_SUCCESS) {
/* tell reduce this is done and what the bytecount was */

View File

@ -40,11 +40,20 @@
#include "spdk/log.h"
#include <rte_config.h>
#include <rte_version.h>
#include <rte_bus_vdev.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#include <rte_mbuf_dyn.h>
/* Used to store IO context in mbuf */
static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
.name = "context_bdev_io",
.size = sizeof(uint64_t),
.align = __alignof__(uint64_t),
.flags = 0,
};
static int g_mbuf_offset;
/* To add support for new device types, follow the examples of the following...
* Note that the string names are defined by the DPDK PMD in question so be
@ -387,6 +396,12 @@ vbdev_crypto_init_crypto_drivers(void)
return 0;
}
g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context);
if (g_mbuf_offset < 0) {
SPDK_ERRLOG("error registering dynamic field with DPDK\n");
return -EINVAL;
}
/*
* Create global mempools, shared by all devices regardless of type.
*/
@ -572,7 +587,8 @@ crypto_dev_poller(void *args)
* partiular bdev_io so need to look at each and determine if it's
* the last one for it's bdev_io or not.
*/
bdev_io = (struct spdk_bdev_io *)dequeued_ops[i]->sym->m_src->userdata;
bdev_io = (struct spdk_bdev_io *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *);
assert(bdev_io != NULL);
io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx;
@ -591,7 +607,7 @@ crypto_dev_poller(void *args)
/* Return the associated src and dst mbufs by collecting them into
* an array that we can use the bulk API to free after the loop.
*/
dequeued_ops[i]->sym->m_src->userdata = NULL;
*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0;
mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src;
if (dequeued_ops[i]->sym->m_dst) {
mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst;
@ -762,7 +778,7 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
src_mbufs[crypto_index]->buf_iova = spdk_vtophys((void *)current_iov, &updated_length);
src_mbufs[crypto_index]->next = NULL;
/* Store context in every mbuf as we don't know anything about completion order */
src_mbufs[crypto_index]->userdata = bdev_io;
*RTE_MBUF_DYNFIELD(src_mbufs[crypto_index], g_mbuf_offset, uint64_t *) = (uint64_t)bdev_io;
/* Set the IV - we use the LBA of the crypto_op */
iv_ptr = rte_crypto_op_ctod_offset(crypto_ops[crypto_index], uint8_t *,

View File

@ -301,6 +301,9 @@ DEFINE_STUB_V(spdk_reduce_vol_destroy, (struct spdk_reduce_backing_dev *backing_
spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
/* DPDK stubs */
#define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
DPDK_DYNFIELD_OFFSET);
DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op));
@ -503,8 +506,8 @@ rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op
CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
}
CU_ASSERT(op->m_src->userdata == ut_expected_op.m_src->userdata);
CU_ASSERT(*RTE_MBUF_DYNFIELD(op->m_src, g_mbuf_offset, uint64_t *) ==
*RTE_MBUF_DYNFIELD(ut_expected_op.m_src, g_mbuf_offset, uint64_t *));
CU_ASSERT(op->src.offset == ut_expected_op.src.offset);
CU_ASSERT(op->src.length == ut_expected_op.src.length);
@ -604,6 +607,7 @@ test_setup(void)
g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1];
}
g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL;
g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
return 0;
}
@ -722,7 +726,7 @@ test_compress_operation(void)
ut_expected_op.m_src = exp_src_mbuf[0];
for (i = 0; i < UT_MBUFS_PER_OP; i++) {
exp_src_mbuf[i]->userdata = &cb_arg;
*RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&cb_arg;
exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
@ -781,7 +785,7 @@ test_compress_operation_cross_boundary(void)
ut_expected_op.m_src = exp_src_mbuf[0];
for (i = 0; i < UT_MBUFS_PER_OP; i++) {
exp_src_mbuf[i]->userdata = &cb_arg;
*RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&cb_arg;
exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
@ -804,7 +808,7 @@ test_compress_operation_cross_boundary(void)
g_small_size_counter = 0;
g_small_size_modify = 1;
g_small_size = 0x800;
exp_src_mbuf[3]->userdata = &cb_arg;
*RTE_MBUF_DYNFIELD(exp_src_mbuf[3], g_mbuf_offset, uint64_t *) = (uint64_t)&cb_arg;
/* first only has shorter length */
exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800;
@ -922,7 +926,7 @@ test_poller(void)
*/
ut_rte_compressdev_dequeue_burst = 1;
/* setup what we want dequeue to return for the op */
g_comp_op[0].m_src->userdata = (void *)cb_args;
*RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args;
g_comp_op[0].produced = 1;
g_comp_op[0].status = 1;
/* value asserted in the reduce callback */
@ -936,10 +940,10 @@ test_poller(void)
*/
ut_rte_compressdev_dequeue_burst = 2;
/* setup what we want dequeue to return for the op */
g_comp_op[0].m_src->userdata = (void *)cb_args;
*RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args;
g_comp_op[0].produced = 16;
g_comp_op[0].status = 0;
g_comp_op[1].m_src->userdata = (void *)cb_args;
*RTE_MBUF_DYNFIELD(g_comp_op[1].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args;
g_comp_op[1].produced = 32;
g_comp_op[1].status = 0;
/* value asserted in the reduce callback */
@ -955,7 +959,7 @@ test_poller(void)
*/
ut_rte_compressdev_dequeue_burst = 1;
/* setup what we want dequeue to return for the op */
g_comp_op[0].m_src->userdata = (void *)cb_args;
*RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args;
g_comp_op[0].produced = 16;
g_comp_op[0].status = 0;
/* value asserted in the reduce callback */
@ -1111,6 +1115,7 @@ test_initdrivers(void)
ut_rte_compressdev_private_xform_create = 0;
rc = vbdev_init_compress_drivers();
CU_ASSERT(rc == 0);
CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
spdk_mempool_free((struct spdk_mempool *)g_mbuf_mp);
}

View File

@ -39,6 +39,7 @@
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#define MAX_TEST_BLOCKS 8192
struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
@ -164,6 +165,9 @@ DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
/* DPDK stubs */
#define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
DPDK_DYNFIELD_OFFSET);
DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
@ -342,6 +346,8 @@ test_setup(void)
memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
}
g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
return 0;
}
@ -439,7 +445,8 @@ test_simple_write(void)
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
@ -468,7 +475,8 @@ test_simple_read(void)
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
@ -502,7 +510,8 @@ test_large_rw(void)
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
}
@ -527,7 +536,8 @@ test_large_rw(void)
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
@ -571,7 +581,7 @@ test_dev_full(void)
CU_ASSERT(sym_op->m_src->next == NULL);
CU_ASSERT(sym_op->cipher.data.length == 512);
CU_ASSERT(sym_op->cipher.data.offset == 0);
CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(sym_op->m_dst == NULL);
/* make sure one got queued and confirm its values */
@ -586,7 +596,7 @@ test_dev_full(void)
CU_ASSERT(sym_op->m_src->next == NULL);
CU_ASSERT(sym_op->cipher.data.length == 512);
CU_ASSERT(sym_op->cipher.data.offset == 0);
CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(sym_op->m_dst == NULL);
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
@ -632,7 +642,8 @@ test_crazy_rw(void)
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
@ -666,7 +677,8 @@ test_crazy_rw(void)
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
@ -838,6 +850,7 @@ test_initdrivers(void)
MOCK_CLEARED_ASSERT(spdk_mempool_create);
MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
rc = vbdev_crypto_init_crypto_drivers();
CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
init_cleanup();
CU_ASSERT(rc == 0);
@ -916,7 +929,8 @@ test_poller(void)
g_dequeue_mock = g_enqueue_mock = 1;
spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uintptr_t)g_bdev_io;
g_test_crypto_ops[0]->sym->m_dst = NULL;
g_io_ctx->cryop_cnt_remaining = 1;
g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
@ -946,11 +960,13 @@ test_poller(void)
g_io_ctx->cryop_cnt_remaining = 2;
spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uint64_t)g_bdev_io;
g_test_crypto_ops[0]->sym->m_dst = NULL;
g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
g_test_crypto_ops[1]->sym->m_src->userdata = g_bdev_io;
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uint64_t)g_bdev_io;
g_test_crypto_ops[1]->sym->m_dst = NULL;
g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;