bdev/crypto: Switched to pkt_mbuf API

- Switched to using rte_mempool for mbufs instead of spdk_mempool. This
  allows using rte pkt_mbuf API that properly handles mbuf fields we need
  for mlx5 and we don't have to do it manually when sending crypto ops.
- Using rte_mempool *g_mbuf_mp in vbdev crypto ut and added the mocking
  API code.
- crypto_ut update to follow pkt_mbuf API rules.

Signed-off-by: Yuriy Umanets <yumanets@nvidia.com>
Change-Id: Ia5576c672ac2eebb260bfdbb528ddb9edcd8f036
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11623
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Yuriy Umanets 2022-01-21 10:02:42 +02:00 committed by Tomasz Zawadzki
parent 03d9c67139
commit a837ea37da
2 changed files with 194 additions and 73 deletions

View File

@ -191,9 +191,11 @@ static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbde
/* Shared mempools between all devices on this system */
static struct rte_mempool *g_session_mp = NULL;
static struct rte_mempool *g_session_mp_priv = NULL;
static struct spdk_mempool *g_mbuf_mp = NULL; /* mbuf mempool */
static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */
static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */
static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macro */
/* For queueing up crypto operations that we can't submit for some reason */
struct vbdev_crypto_op {
uint8_t cdev_id;
@ -377,6 +379,12 @@ err:
return rc;
}
/* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but
* this callback has to be here. */
static void shinfo_free_cb(void *arg1, void *arg2)
{
}
/* This is called from the module's init function. We setup all crypto devices early on as we are unable
* to easily dynamically configure queue pairs after the drivers are up and running. So, here, we
* configure the max capabilities of each device and assign threads to queue pairs as channels are
@ -452,9 +460,8 @@ vbdev_crypto_init_crypto_drivers(void)
return -ENOMEM;
}
g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS, POOL_CACHE_SIZE,
0, 0, SPDK_ENV_SOCKET_ID_ANY);
if (g_mbuf_mp == NULL) {
SPDK_ERRLOG("Cannot create mbuf pool\n");
rc = -ENOMEM;
@ -493,6 +500,7 @@ vbdev_crypto_init_crypto_drivers(void)
dev_qp->index = i++;
}
g_shinfo.free_cb = shinfo_free_cb;
return 0;
/* Error cleanup paths. */
@ -504,7 +512,7 @@ err:
rte_mempool_free(g_crypto_op_mp);
g_crypto_op_mp = NULL;
error_create_op:
spdk_mempool_free(g_mbuf_mp);
rte_mempool_free(g_mbuf_mp);
g_mbuf_mp = NULL;
error_create_mbuf:
rte_mempool_free(g_session_mp);
@ -587,7 +595,7 @@ crypto_dev_poller(void *args)
struct spdk_bdev_io *bdev_io = NULL;
struct crypto_bdev_io *io_ctx = NULL;
struct rte_crypto_op *dequeued_ops[MAX_DEQUEUE_BURST_SIZE];
struct rte_crypto_op *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE];
struct rte_mbuf *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE];
int num_mbufs = 0;
struct vbdev_crypto_op *op_to_resubmit;
@ -652,9 +660,7 @@ crypto_dev_poller(void *args)
(void **)dequeued_ops,
num_dequeued_ops);
assert(num_mbufs > 0);
spdk_mempool_put_bulk(g_mbuf_mp,
(void **)mbufs_to_free,
num_mbufs);
rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
}
/* Check if there are any pending crypto ops to process */
@ -710,7 +716,7 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
uint32_t allocated = 0;
uint8_t *current_iov = NULL;
uint64_t total_remaining = 0;
uint64_t updated_length, current_iov_remaining = 0;
uint64_t current_iov_remaining = 0;
uint32_t crypto_index = 0;
uint32_t en_offset = 0;
struct rte_crypto_op *crypto_ops[MAX_ENQUEUE_ARRAY_SIZE];
@ -727,7 +733,7 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
* LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the
* op would be > 1 LBA.
*/
rc = spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], cryop_cnt);
rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, cryop_cnt);
if (rc) {
SPDK_ERRLOG("ERROR trying to get src_mbufs!\n");
return -ENOMEM;
@ -735,7 +741,7 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
/* Get the same amount but these buffers to describe the encrypted data location (dst). */
if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
rc = spdk_mempool_get_bulk(g_mbuf_mp, (void **)&dst_mbufs[0], cryop_cnt);
rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, cryop_cnt);
if (rc) {
SPDK_ERRLOG("ERROR trying to get dst_mbufs!\n");
rc = -ENOMEM;
@ -787,17 +793,26 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len;
do {
uint8_t *iv_ptr;
uint8_t *buf_addr;
uint64_t phys_addr;
uint64_t op_block_offset;
uint64_t phys_len;
/* Set the mbuf elements address and length. Null out the next pointer. */
src_mbufs[crypto_index]->buf_addr = current_iov;
src_mbufs[crypto_index]->data_len = updated_length = crypto_len;
/* TODO: Make this assignment conditional on QAT usage and add an assert. */
src_mbufs[crypto_index]->buf_iova = spdk_vtophys((void *)current_iov, &updated_length);
src_mbufs[crypto_index]->next = NULL;
/* Store context in every mbuf as we don't know anything about completion order */
*RTE_MBUF_DYNFIELD(src_mbufs[crypto_index], g_mbuf_offset, uint64_t *) = (uint64_t)bdev_io;
phys_len = crypto_len;
phys_addr = spdk_vtophys((void *)current_iov, &phys_len);
if (phys_addr == SPDK_VTOPHYS_ERROR) {
rc = -EFAULT;
goto error_attach_session;
}
/* Set the mbuf elements address and length. */
rte_pktmbuf_attach_extbuf(src_mbufs[crypto_index], current_iov,
phys_addr, crypto_len, &g_shinfo);
rte_pktmbuf_append(src_mbufs[crypto_index], crypto_len);
/* Set the IV - we use the LBA of the crypto_op */
iv_ptr = rte_crypto_op_ctod_offset(crypto_ops[crypto_index], uint8_t *,
IV_OFFSET);
@ -811,27 +826,24 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
/* link the mbuf to the crypto op. */
crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index];
if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
crypto_ops[crypto_index]->sym->m_dst = src_mbufs[crypto_index];
} else {
crypto_ops[crypto_index]->sym->m_dst = NULL;
}
/* For encrypt, point the destination to a buffer we allocate and redirect the bdev_io
* that will be used to process the write on completion to the same buffer. Setting
* up the en_buffer is a little simpler as we know the destination buffer is single IOV.
*/
if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
buf_addr = io_ctx->aux_buf_iov.iov_base + en_offset;
phys_addr = spdk_vtophys((void *)buf_addr, NULL);
if (phys_addr == SPDK_VTOPHYS_ERROR) {
rc = -EFAULT;
goto error_attach_session;
}
rte_pktmbuf_attach_extbuf(dst_mbufs[crypto_index], buf_addr,
phys_addr, crypto_len, &g_shinfo);
rte_pktmbuf_append(dst_mbufs[crypto_index], crypto_len);
/* Set the relevant destination en_mbuf elements. */
dst_mbufs[crypto_index]->buf_addr = io_ctx->aux_buf_iov.iov_base + en_offset;
dst_mbufs[crypto_index]->data_len = updated_length = crypto_len;
/* TODO: Make this assignment conditional on QAT usage and add an assert. */
dst_mbufs[crypto_index]->buf_iova = spdk_vtophys(dst_mbufs[crypto_index]->buf_addr,
&updated_length);
crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
en_offset += crypto_len;
dst_mbufs[crypto_index]->next = NULL;
/* Attach the crypto session to the operation */
rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index],
@ -842,6 +854,8 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
}
} else {
crypto_ops[crypto_index]->sym->m_dst = NULL;
/* Attach the crypto session to the operation */
rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index],
io_ctx->crypto_bdev->session_decrypt);
@ -926,16 +940,14 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
error_attach_session:
error_get_ops:
if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
spdk_mempool_put_bulk(g_mbuf_mp, (void **)&dst_mbufs[0],
cryop_cnt);
rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
}
if (allocated > 0) {
rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops,
allocated);
}
error_get_dst:
spdk_mempool_put_bulk(g_mbuf_mp, (void **)&src_mbufs[0],
cryop_cnt);
rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt);
return rc;
}
@ -1623,7 +1635,7 @@ vbdev_crypto_finish(void)
}
rte_mempool_free(g_crypto_op_mp);
spdk_mempool_free(g_mbuf_mp);
rte_mempool_free(g_mbuf_mp);
rte_mempool_free(g_session_mp);
if (g_session_mp_priv != NULL) {
rte_mempool_free(g_session_mp_priv);

View File

@ -55,6 +55,103 @@ int ut_rte_crypto_op_attach_sym_session = 0;
int ut_rte_cryptodev_info_get = 0;
bool ut_rte_cryptodev_info_get_mocked = false;
void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
#define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
{
spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
}
void mock_rte_pktmbuf_free(struct rte_mbuf *m);
#define rte_pktmbuf_free mock_rte_pktmbuf_free
void mock_rte_pktmbuf_free(struct rte_mbuf *m)
{
spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
}
void rte_mempool_free(struct rte_mempool *mp)
{
spdk_mempool_free((struct spdk_mempool *)mp);
}
int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
unsigned count);
#define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
unsigned count)
{
int rc;
rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
if (rc) {
return rc;
}
for (unsigned i = 0; i < count; i++) {
rte_pktmbuf_reset(mbufs[i]);
mbufs[i]->pool = pool;
}
return rc;
}
struct rte_mempool *
rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
uint32_t elt_size, uint32_t cache_size,
uint16_t priv_size, int socket_id)
{
struct spdk_mempool *tmp;
tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
cache_size, socket_id);
return (struct rte_mempool *)tmp;
}
struct rte_mempool *
rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
uint16_t priv_size, uint16_t data_room_size, int socket_id)
{
struct spdk_mempool *tmp;
tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
cache_size, socket_id);
return (struct rte_mempool *)tmp;
}
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
struct spdk_mempool *tmp;
tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
cache_size, socket_id);
return (struct rte_mempool *)tmp;
}
DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
struct rte_mempool *
rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
unsigned nb_elts, unsigned cache_size,
uint16_t priv_size, int socket_id)
{
struct spdk_mempool *tmp;
HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
tmp = spdk_mempool_create(name, nb_elts,
sizeof(struct rte_crypto_op) + priv_size,
cache_size, socket_id);
return (struct rte_mempool *)tmp;
}
/* Those functions are defined as static inline in DPDK, so we can't
* mock them straight away. We use defines to redirect them into
* our custom functions.
@ -169,25 +266,11 @@ DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
DPDK_DYNFIELD_OFFSET);
DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags), (struct rte_mempool *)1);
DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
(const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
uint32_t nb_elts,
uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
int socket_id), (struct rte_mempool *)1);
DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
@ -330,10 +413,9 @@ test_setup(void)
TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
/* Allocate a real mbuf pool so we can test error paths */
g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
(unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
0, 0, SPDK_ENV_SOCKET_ID_ANY);
/* Instead of allocating real rte mempools for these, it's easier and provides the
* same coverage just calloc them here.
*/
@ -358,7 +440,24 @@ test_cleanup(void)
{
int i;
spdk_mempool_free(g_mbuf_mp);
if (g_crypto_op_mp) {
rte_mempool_free(g_crypto_op_mp);
g_crypto_op_mp = NULL;
}
if (g_mbuf_mp) {
rte_mempool_free(g_mbuf_mp);
g_mbuf_mp = NULL;
}
if (g_session_mp) {
rte_mempool_free(g_session_mp);
g_session_mp = NULL;
}
if (g_session_mp_priv != NULL) {
/* g_session_mp_priv may or may not be set depending on the DPDK version */
rte_mempool_free(g_session_mp_priv);
g_session_mp_priv = NULL;
}
for (i = 0; i < MAX_TEST_BLOCKS; i++) {
free(g_test_crypto_ops[i]);
}
@ -378,6 +477,7 @@ test_error_paths(void)
g_bdev_io->u.bdev.iovcnt = 1;
g_bdev_io->u.bdev.num_blocks = 1;
g_bdev_io->u.bdev.iovs[0].iov_len = 512;
g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
g_crypto_bdev.crypto_bdev.blocklen = 512;
g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
@ -451,8 +551,8 @@ test_simple_write(void)
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
}
static void
@ -480,7 +580,7 @@ test_simple_read(void)
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
}
static void
@ -514,7 +614,7 @@ test_large_rw(void)
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
}
/* Multi block size write, multi-element */
@ -545,8 +645,8 @@ test_large_rw(void)
CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
}
}
@ -600,8 +700,8 @@ test_dev_full(void)
CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(sym_op->m_dst == NULL);
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
/* Non-busy reason for enqueue failure, all were rejected. */
g_enqueue_mock = 0;
@ -647,7 +747,7 @@ test_crazy_rw(void)
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
}
/* Multi block size write, single element strange IOV makeup */
@ -682,8 +782,8 @@ test_crazy_rw(void)
uint64_t *) == (uint64_t)g_bdev_io);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
}
}
@ -728,13 +828,22 @@ test_reset(void)
static void
init_cleanup(void)
{
spdk_mempool_free(g_mbuf_mp);
rte_mempool_free(g_session_mp);
g_mbuf_mp = NULL;
g_session_mp = NULL;
if (g_crypto_op_mp) {
rte_mempool_free(g_crypto_op_mp);
g_crypto_op_mp = NULL;
}
if (g_mbuf_mp) {
rte_mempool_free(g_mbuf_mp);
g_mbuf_mp = NULL;
}
if (g_session_mp) {
rte_mempool_free(g_session_mp);
g_session_mp = NULL;
}
if (g_session_mp_priv != NULL) {
/* g_session_mp_priv may or may not be set depending on the DPDK version */
rte_mempool_free(g_session_mp_priv);
g_session_mp_priv = NULL;
}
}
@ -742,7 +851,7 @@ static void
test_initdrivers(void)
{
int rc;
static struct spdk_mempool *orig_mbuf_mp;
static struct rte_mempool *orig_mbuf_mp;
static struct rte_mempool *orig_session_mp;
static struct rte_mempool *orig_session_mp_priv;
@ -791,7 +900,7 @@ test_initdrivers(void)
CU_ASSERT(g_mbuf_mp == NULL);
CU_ASSERT(g_session_mp == NULL);
CU_ASSERT(g_session_mp_priv == NULL);
MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
MOCK_CLEAR(rte_crypto_op_pool_create);
/* Check resources are not sufficient */
MOCK_CLEARED_ASSERT(spdk_mempool_create);
@ -928,7 +1037,7 @@ test_poller(void)
/* test regular 1 op to dequeue and complete */
g_dequeue_mock = g_enqueue_mock = 1;
spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uintptr_t)g_bdev_io;
@ -959,7 +1068,7 @@ test_poller(void)
/* 2 to dequeue but 2nd one failed */
g_dequeue_mock = g_enqueue_mock = 2;
g_io_ctx->cryop_cnt_remaining = 2;
spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uint64_t)g_bdev_io;