test/unit: add rdma unittest file.

Includes tests for the parse_sgl function.

Change-Id: I83a854598c7320b31b75a4fa5ebbfe66cb708b6d
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/429070
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Seth Howell 2018-10-11 16:21:05 -07:00 committed by Jim Harris
parent 4b88ed5687
commit d6c0c192fb
6 changed files with 305 additions and 4 deletions

View File

@ -192,12 +192,12 @@ test_nvme_rdma_build_sgl_request(void)
rmap.map = map;
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
ctrlr.cdata.nvmf_specific.msdbd = 16;
rqpair.mr_map = &rmap;
rqpair.qpair.ctrlr = &ctrlr;
rqpair.cmds = &cmd;
cmd.sgl[0].address = 0x1111;
rdma_req.id = 0;
rdma_req.req = &req;
@ -252,10 +252,10 @@ test_nvme_rdma_build_sgl_request(void)
CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
}
/* Test case 3: Multiple SGL, SGL larger than mr size. Expected: FAIL */
/* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
bio.iovpos = 0;
req.payload_offset = 0;
g_mr_size = 0x500;
g_mr_size = 0x800;
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
SPDK_CU_ASSERT_FATAL(rc != 0);
CU_ASSERT(bio.iovpos == 1);

View File

@ -36,6 +36,8 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = request.c ctrlr.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
DIRS-$(CONFIG_RDMA) += rdma.c
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)

1
test/unit/lib/nvmf/rdma.c/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
rdma_ut

View File

@ -0,0 +1,38 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = rdma_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -0,0 +1,257 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "nvmf/rdma.c"
uint64_t g_mr_size;
struct ibv_mr g_rdma_mr;
#define RDMA_UT_UNITS_IN_MAX_IO 16
struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
.max_io_size = (SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
.io_unit_size = SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE,
.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
};
SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
uint64_t size, uint64_t translation), 0);
DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
uint64_t size), 0);
DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
struct spdk_trace_histories *g_trace_histories;
DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
uint8_t arg1_is_ptr, const char *arg1_name));
DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
uint32_t size, uint64_t object_id, uint64_t arg1));
DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
const struct spdk_nvme_transport_id *trid2), 0);
DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
uint64_t
spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
{
if (g_mr_size != 0) {
*(uint32_t *)size = g_mr_size;
}
return (uint64_t)&g_rdma_mr;
}
static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
{
int i;
rdma_req->req.length = 0;
rdma_req->data_from_pool = false;
rdma_req->req.data = NULL;
rdma_req->data.wr.num_sge = 0;
rdma_req->data.wr.wr.rdma.remote_addr = 0;
rdma_req->data.wr.wr.rdma.rkey = 0;
for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
rdma_req->req.iov[i].iov_base = 0;
rdma_req->req.iov[i].iov_len = 0;
rdma_req->data.buffers[i] = 0;
rdma_req->data.wr.sg_list[i].addr = 0;
rdma_req->data.wr.sg_list[i].length = 0;
rdma_req->data.wr.sg_list[i].lkey = 0;
}
}
static void
test_spdk_nvmf_rdma_request_parse_sgl(void)
{
struct spdk_nvmf_rdma_transport rtransport;
struct spdk_nvmf_rdma_device device;
struct spdk_nvmf_rdma_request rdma_req;
struct spdk_nvmf_rdma_recv recv;
union nvmf_c2h_msg cpl;
union nvmf_h2c_msg cmd;
struct spdk_nvme_sgl_descriptor *sgl;
int rc, i;
sgl = &cmd.nvme_cmd.dptr.sgl1;
rdma_req.recv = &recv;
rdma_req.req.cmd = &cmd;
rdma_req.req.rsp = &cpl;
rdma_req.data.wr.sg_list = rdma_req.data.sgl;
rtransport.transport.opts = g_rdma_ut_transport_opts;
device.attr.device_cap_flags = 0;
g_rdma_mr.lkey = 0xABCD;
sgl->keyed.key = 0xEEEE;
sgl->address = 0xFFFF;
rdma_req.recv->buf = (void *)0xDDDD;
/* Test 1: sgl type: keyed data block subtype: address */
sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
/* Part 1: simple I/O, one SGL smaller than the transport io unit size */
MOCK_SET(spdk_mempool_get, (void *)0x2000);
reset_nvmf_rdma_request(&rdma_req);
sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == 0);
CU_ASSERT(rdma_req.data_from_pool == true);
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
CU_ASSERT((uint64_t)rdma_req.data.buffers[0] == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
/* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
reset_nvmf_rdma_request(&rdma_req);
sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == 0);
CU_ASSERT(rdma_req.data_from_pool == true);
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
CU_ASSERT((uint64_t)rdma_req.data.buffers[i] == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
}
/* Part 3: simple I/O one SGL larger than the transport max io size */
reset_nvmf_rdma_request(&rdma_req);
sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == -1);
/* Part 4: Pretend there are no buffer pools */
MOCK_SET(spdk_mempool_get, NULL);
reset_nvmf_rdma_request(&rdma_req);
sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == 0);
CU_ASSERT(rdma_req.data_from_pool == false);
CU_ASSERT(rdma_req.req.data == NULL);
CU_ASSERT(rdma_req.data.wr.num_sge == 0);
CU_ASSERT(rdma_req.data.buffers[0] == NULL);
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
rdma_req.recv->buf = (void *)0xDDDD;
/* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
/* Part 1: Normal I/O smaller than in capsule data size no offset */
reset_nvmf_rdma_request(&rdma_req);
sgl->address = 0;
sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == 0);
CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
CU_ASSERT(rdma_req.data_from_pool == false);
/* Part 2: I/O offset + length too large */
reset_nvmf_rdma_request(&rdma_req);
sgl->address = rtransport.transport.opts.in_capsule_data_size;
sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == -1);
/* Part 3: I/O too large */
reset_nvmf_rdma_request(&rdma_req);
sgl->address = 0;
sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
CU_ASSERT(rc == -1);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("nvmf", NULL, NULL);
if (suite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite, "test_parse_sgl", test_spdk_nvmf_rdma_request_parse_sgl) == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -87,7 +87,7 @@ $valgrind $testdir/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut
$valgrind $testdir/lib/nvme/nvme_qpair.c/nvme_qpair_ut
$valgrind $testdir/lib/nvme/nvme_pcie.c/nvme_pcie_ut
$valgrind $testdir/lib/nvme/nvme_quirks.c/nvme_quirks_ut
if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/config.h; then
if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/nvme/nvme_rdma.c/nvme_rdma_ut
fi
@ -104,6 +104,9 @@ $valgrind $testdir/lib/log/log.c/log_ut
$valgrind $testdir/lib/nvmf/ctrlr.c/ctrlr_ut
$valgrind $testdir/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
$valgrind $testdir/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/nvmf/rdma.c/rdma_ut
fi
$valgrind $testdir/lib/nvmf/request.c/request_ut
$valgrind $testdir/lib/nvmf/subsystem.c/subsystem_ut