test/nvme: move nvme_pcie unit tests to a new file

The PCIe-specific unit tests still need to be updated; this patch just
moves the existing tests over and stubs out the necessary external
functions.

Change-Id: Ia6d46013231d8880df111b744523d02b56b9b37a
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2017-01-05 16:12:54 -07:00 committed by Jim Harris
parent f80c0f4fdd
commit f3c45ea0b3
7 changed files with 705 additions and 384 deletions

View File

@ -18,6 +18,7 @@ $valgrind $testdir/unit/nvme_c/nvme_ut
$valgrind $testdir/unit/nvme_qpair_c/nvme_qpair_ut
$valgrind $testdir/unit/nvme_ctrlr_c/nvme_ctrlr_ut
$valgrind $testdir/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut
$valgrind $testdir/unit/nvme_pcie_c/nvme_pcie_ut
timing_exit unit
if [ $RUN_NIGHTLY -eq 1 ]; then

View File

@ -35,7 +35,7 @@ SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = nvme_c nvme_ns_cmd_c nvme_qpair_c nvme_ctrlr_c \
nvme_ctrlr_cmd_c nvme_quirks_c
nvme_ctrlr_cmd_c nvme_pcie_c nvme_quirks_c
.PHONY: all clean $(DIRS-y)

View File

@ -0,0 +1 @@
nvme_pcie_ut

View File

@ -0,0 +1,38 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = nvme_pcie_ut.c
include $(SPDK_ROOT_DIR)/mk/nvme.unittest.mk

View File

@ -0,0 +1,663 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdbool.h>
#include <stdlib.h>
#include "spdk_cunit.h"
#include "lib/nvme/unit/test_env.c"
#include "nvme/nvme_pcie.c"
struct spdk_trace_flag SPDK_TRACE_NVME = {
.name = "nvme",
.enabled = false,
};
static struct nvme_driver _g_nvme_driver = {
.lock = PTHREAD_MUTEX_INITIALIZER,
.request_mempool = NULL,
};
struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
int32_t spdk_nvme_retry_count = 1;
struct nvme_request *g_request = NULL;
extern bool ut_fail_vtophys;
bool fail_next_sge = false;
struct io_request {
uint64_t address_offset;
bool invalid_addr;
bool invalid_second_addr;
};
void
nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
{
abort();
}
int
spdk_uevent_connect(void)
{
abort();
}
int
spdk_get_uevent(int fd, struct spdk_uevent *uevent)
{
abort();
}
struct spdk_pci_id
spdk_pci_device_get_id(struct spdk_pci_device *dev)
{
abort();
}
int
nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio)
{
abort();
}
int
spdk_pci_nvme_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
{
abort();
}
int
spdk_pci_nvme_device_attach(spdk_pci_enum_cb enum_cb, void *enum_ctx,
struct spdk_pci_addr *pci_address)
{
abort();
}
void
spdk_pci_device_detach(struct spdk_pci_device *device)
{
abort();
}
int
spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
{
abort();
}
int
spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
{
abort();
}
struct spdk_pci_addr
spdk_pci_device_get_addr(struct spdk_pci_device *dev)
{
abort();
}
int
spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value, uint32_t offset)
{
abort();
}
int
spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value, uint32_t offset)
{
abort();
}
int
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
{
abort();
}
void
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
abort();
}
int
nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
abort();
}
void
nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
{
abort();
}
int
nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid, void *devhandle,
spdk_nvme_probe_cb probe_cb, void *cb_ctx)
{
abort();
}
int
nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
{
abort();
}
void
nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap)
{
abort();
}
uint64_t
nvme_get_quirks(const struct spdk_pci_id *id)
{
abort();
}
void
nvme_free_request(struct nvme_request *req)
{
abort();
}
bool
nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl)
{
abort();
}
void
nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd)
{
abort();
}
void
nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl)
{
abort();
}
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
abort();
}
int
nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
abort();
}
struct nvme_request *
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
abort();
}
void
nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
{
abort();
}
int32_t
spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
abort();
}
void
nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
{
abort();
}
#if 0 /* TODO: update PCIe-specific unit test */
static void
nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
{
struct io_request *req = (struct io_request *)cb_arg;
req->address_offset = 0;
req->invalid_addr = false;
req->invalid_second_addr = false;
switch (sgl_offset) {
case 0:
req->invalid_addr = false;
break;
case 1:
req->invalid_addr = true;
break;
case 2:
req->invalid_addr = false;
req->invalid_second_addr = true;
break;
default:
break;
}
return;
}
static int
nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
{
struct io_request *req = (struct io_request *)cb_arg;
if (req->address_offset == 0) {
if (req->invalid_addr) {
*address = (void *)7;
} else {
*address = (void *)(4096 * req->address_offset);
}
} else if (req->address_offset == 1) {
if (req->invalid_second_addr) {
*address = (void *)7;
} else {
*address = (void *)(4096 * req->address_offset);
}
} else {
*address = (void *)(4096 * req->address_offset);
}
req->address_offset += 1;
*length = 4096;
if (fail_next_sge) {
return - 1;
} else {
return 0;
}
}
static void
prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
struct spdk_nvme_ctrlr *ctrlr)
{
memset(ctrlr, 0, sizeof(*ctrlr));
ctrlr->free_io_qids = NULL;
TAILQ_INIT(&ctrlr->active_io_qpairs);
TAILQ_INIT(&ctrlr->active_procs);
nvme_qpair_init(qpair, 1, ctrlr, 0);
ut_fail_vtophys = false;
}
static void
cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
{
}
static void
ut_insert_cq_entry(struct spdk_nvme_qpair *qpair, uint32_t slot)
{
struct nvme_request *req;
struct nvme_tracker *tr;
struct spdk_nvme_cpl *cpl;
req = spdk_mempool_get(_g_nvme_driver.request_mempool);
SPDK_CU_ASSERT_FATAL(req != NULL);
memset(req, 0, sizeof(*req));
tr = TAILQ_FIRST(&qpair->free_tr);
TAILQ_REMOVE(&qpair->free_tr, tr, tq_list); /* remove tr from free_tr */
TAILQ_INSERT_HEAD(&qpair->outstanding_tr, tr, tq_list);
req->cmd.cid = tr->cid;
tr->req = req;
qpair->tr[tr->cid].active = true;
cpl = &qpair->cpl[slot];
cpl->status.p = qpair->phase;
cpl->cid = tr->cid;
}
static void
expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
{
CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
}
static void
expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
{
CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
}
static void
test4(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {};
char payload[4096];
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL);
/* Force vtophys to return a failure. This should
* result in the nvme_qpair manually failing
* the request with error status to signify
* a bad payload buffer.
*/
ut_fail_vtophys = true;
CU_ASSERT(qpair.sq_tail == 0);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
}
static void
test_sgl_req(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_payload payload = {};
struct nvme_tracker *sgl_tr = NULL;
uint64_t i;
struct io_request io_req = {};
payload.type = NVME_PAYLOAD_TYPE_SGL;
payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
payload.u.sgl.next_sge_fn = nvme_request_next_sge;
payload.u.sgl.cb_arg = &io_req;
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 7 | 0;
req->payload_offset = 1;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 7 | 0;
spdk_nvme_retry_count = 1;
fail_next_sge = true;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
fail_next_sge = false;
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 15 | 0;
req->payload_offset = 2;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 4095 | 0;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
CU_ASSERT(qpair.sq_tail == 1);
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
if (sgl_tr != NULL) {
for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
}
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
}
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
}
static void
test_hw_sgl_req(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_payload payload = {};
struct nvme_tracker *sgl_tr = NULL;
uint64_t i;
struct io_request io_req = {};
payload.type = NVME_PAYLOAD_TYPE_SGL;
payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
payload.u.sgl.next_sge_fn = nvme_request_next_sge;
payload.u.sgl.cb_arg = &io_req;
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 7 | 0;
req->payload_offset = 0;
ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
nvme_qpair_submit_request(&qpair, req);
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
CU_ASSERT(sgl_tr != NULL);
CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 2023 | 0;
req->payload_offset = 0;
ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
nvme_qpair_submit_request(&qpair, req);
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
CU_ASSERT(sgl_tr != NULL);
for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
}
CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
}
static void test_nvme_qpair_fail(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req = NULL;
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_tracker *tr_temp;
prepare_submit_request_test(&qpair, &ctrlr);
tr_temp = TAILQ_FIRST(&qpair.free_tr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
tr_temp->req->cmd.cid = tr_temp->cid;
TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
nvme_qpair_fail(&qpair);
CU_ASSERT_TRUE(TAILQ_EMPTY(&qpair.outstanding_tr));
req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL);
STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
nvme_qpair_fail(&qpair);
CU_ASSERT_TRUE(STAILQ_EMPTY(&qpair.queued_req));
cleanup_submit_request_test(&qpair);
}
static void
test_nvme_qpair_process_completions_limit(void)
{
struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {};
prepare_submit_request_test(&qpair, &ctrlr);
qpair.is_enabled = true;
/* Insert 4 entries into the completion queue */
CU_ASSERT(qpair.cq_head == 0);
ut_insert_cq_entry(&qpair, 0);
ut_insert_cq_entry(&qpair, 1);
ut_insert_cq_entry(&qpair, 2);
ut_insert_cq_entry(&qpair, 3);
/* This should only process 2 completions, and 2 should be left in the queue */
spdk_nvme_qpair_process_completions(&qpair, 2);
CU_ASSERT(qpair.cq_head == 2);
/* This should only process 1 completion, and 1 should be left in the queue */
spdk_nvme_qpair_process_completions(&qpair, 1);
CU_ASSERT(qpair.cq_head == 3);
/* This should process the remaining completion */
spdk_nvme_qpair_process_completions(&qpair, 5);
CU_ASSERT(qpair.cq_head == 4);
cleanup_submit_request_test(&qpair);
}
static void test_nvme_qpair_destroy(void)
{
struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_tracker *tr_temp;
memset(&ctrlr, 0, sizeof(ctrlr));
TAILQ_INIT(&ctrlr.free_io_qpairs);
TAILQ_INIT(&ctrlr.active_io_qpairs);
TAILQ_INIT(&ctrlr.active_procs);
nvme_qpair_init(&qpair, 1, 128, &ctrlr);
nvme_qpair_destroy(&qpair);
nvme_qpair_init(&qpair, 0, 128, &ctrlr);
tr_temp = TAILQ_FIRST(&qpair.free_tr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
tr_temp->req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
tr_temp->req->cmd.cid = tr_temp->cid;
TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
nvme_qpair_destroy(&qpair);
CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding_tr));
}
#endif
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("nvme_pcie", NULL, NULL);
if (suite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
#if 0
if (CU_add_test(suite, "test3", test3) == NULL
|| CU_add_test(suite, "test4", test4) == NULL
) {
CU_cleanup_registry();
return CU_get_error();
}
#endif
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -31,8 +31,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/uio.h>
#include <stdbool.h>
#include "spdk_cunit.h"
@ -49,77 +47,6 @@ struct nvme_driver _g_nvme_driver = {
.request_mempool = NULL,
};
int32_t spdk_nvme_retry_count = 1;
struct nvme_request *g_request = NULL;
extern bool ut_fail_vtophys;
bool fail_next_sge = false;
struct io_request {
uint64_t address_offset;
bool invalid_addr;
bool invalid_second_addr;
};
#if 0 /* TODO: move to PCIe-specific unit test */
static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
{
struct io_request *req = (struct io_request *)cb_arg;
req->address_offset = 0;
req->invalid_addr = false;
req->invalid_second_addr = false;
switch (sgl_offset) {
case 0:
req->invalid_addr = false;
break;
case 1:
req->invalid_addr = true;
break;
case 2:
req->invalid_addr = false;
req->invalid_second_addr = true;
break;
default:
break;
}
return;
}
static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
{
struct io_request *req = (struct io_request *)cb_arg;
if (req->address_offset == 0) {
if (req->invalid_addr) {
*address = (void *)7;
} else {
*address = (void *)(4096 * req->address_offset);
}
} else if (req->address_offset == 1) {
if (req->invalid_second_addr) {
*address = (void *)7;
} else {
*address = (void *)(4096 * req->address_offset);
}
} else {
*address = (void *)(4096 * req->address_offset);
}
req->address_offset += 1;
*length = 4096;
if (fail_next_sge) {
return - 1;
} else {
return 0;
}
}
#endif
struct nvme_request *
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn,
@ -224,8 +151,6 @@ prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
TAILQ_INIT(&ctrlr->active_io_qpairs);
TAILQ_INIT(&ctrlr->active_procs);
nvme_qpair_init(qpair, 1, ctrlr, 0);
ut_fail_vtophys = false;
}
static void
@ -233,31 +158,6 @@ cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
{
}
#if 0 /* TODO: move to PCIe-specific unit test */
static void
ut_insert_cq_entry(struct spdk_nvme_qpair *qpair, uint32_t slot)
{
struct nvme_request *req;
struct nvme_tracker *tr;
struct spdk_nvme_cpl *cpl;
req = spdk_mempool_get(_g_nvme_driver.request_mempool);
SPDK_CU_ASSERT_FATAL(req != NULL);
memset(req, 0, sizeof(*req));
tr = TAILQ_FIRST(&qpair->free_tr);
TAILQ_REMOVE(&qpair->free_tr, tr, tq_list); /* remove tr from free_tr */
TAILQ_INSERT_HEAD(&qpair->outstanding_tr, tr, tq_list);
req->cmd.cid = tr->cid;
tr->req = req;
qpair->tr[tr->cid].active = true;
cpl = &qpair->cpl[slot];
cpl->status.p = qpair->phase;
cpl->cid = tr->cid;
}
#endif
static void
expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
{
@ -289,178 +189,6 @@ test3(void)
cleanup_submit_request_test(&qpair);
}
#if 0 /* TODO: move to PCIe-specific unit test */
static void
test4(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {};
char payload[4096];
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL);
/* Force vtophys to return a failure. This should
* result in the nvme_qpair manually failing
* the request with error status to signify
* a bad payload buffer.
*/
ut_fail_vtophys = true;
CU_ASSERT(qpair.sq_tail == 0);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
}
static void
test_sgl_req(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_payload payload = {};
struct nvme_tracker *sgl_tr = NULL;
uint64_t i;
struct io_request io_req = {};
payload.type = NVME_PAYLOAD_TYPE_SGL;
payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
payload.u.sgl.next_sge_fn = nvme_request_next_sge;
payload.u.sgl.cb_arg = &io_req;
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 7 | 0;
req->payload_offset = 1;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 7 | 0;
spdk_nvme_retry_count = 1;
fail_next_sge = true;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
fail_next_sge = false;
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 15 | 0;
req->payload_offset = 2;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 4095 | 0;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
CU_ASSERT(qpair.sq_tail == 1);
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
if (sgl_tr != NULL) {
for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
}
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
}
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
}
static void
test_hw_sgl_req(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_payload payload = {};
struct nvme_tracker *sgl_tr = NULL;
uint64_t i;
struct io_request io_req = {};
payload.type = NVME_PAYLOAD_TYPE_SGL;
payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
payload.u.sgl.next_sge_fn = nvme_request_next_sge;
payload.u.sgl.cb_arg = &io_req;
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 7 | 0;
req->payload_offset = 0;
ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
nvme_qpair_submit_request(&qpair, req);
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
CU_ASSERT(sgl_tr != NULL);
CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE;
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 2023 | 0;
req->payload_offset = 0;
ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
nvme_qpair_submit_request(&qpair, req);
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
CU_ASSERT(sgl_tr != NULL);
for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
}
CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
}
#endif
static void
test_ctrlr_failed(void)
{
@ -494,39 +222,6 @@ static void struct_packing(void)
CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
}
#if 0 /* TODO: move to PCIe-specific unit test */
static void test_nvme_qpair_fail(void)
{
struct spdk_nvme_qpair qpair = {};
struct nvme_request *req = NULL;
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_tracker *tr_temp;
prepare_submit_request_test(&qpair, &ctrlr);
tr_temp = TAILQ_FIRST(&qpair.free_tr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
tr_temp->req->cmd.cid = tr_temp->cid;
TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
nvme_qpair_fail(&qpair);
CU_ASSERT_TRUE(TAILQ_EMPTY(&qpair.outstanding_tr));
req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL);
STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
nvme_qpair_fail(&qpair);
CU_ASSERT_TRUE(STAILQ_EMPTY(&qpair.queued_req));
cleanup_submit_request_test(&qpair);
}
#endif
static void test_nvme_qpair_process_completions(void)
{
struct spdk_nvme_qpair qpair = {};
@ -539,69 +234,6 @@ static void test_nvme_qpair_process_completions(void)
cleanup_submit_request_test(&qpair);
}
#if 0 /* TODO: move to PCIe-specific unit test */
static void
test_nvme_qpair_process_completions_limit(void)
{
struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {};
prepare_submit_request_test(&qpair, &ctrlr);
qpair.is_enabled = true;
/* Insert 4 entries into the completion queue */
CU_ASSERT(qpair.cq_head == 0);
ut_insert_cq_entry(&qpair, 0);
ut_insert_cq_entry(&qpair, 1);
ut_insert_cq_entry(&qpair, 2);
ut_insert_cq_entry(&qpair, 3);
/* This should only process 2 completions, and 2 should be left in the queue */
spdk_nvme_qpair_process_completions(&qpair, 2);
CU_ASSERT(qpair.cq_head == 2);
/* This should only process 1 completion, and 1 should be left in the queue */
spdk_nvme_qpair_process_completions(&qpair, 1);
CU_ASSERT(qpair.cq_head == 3);
/* This should process the remaining completion */
spdk_nvme_qpair_process_completions(&qpair, 5);
CU_ASSERT(qpair.cq_head == 4);
cleanup_submit_request_test(&qpair);
}
static void test_nvme_qpair_destroy(void)
{
struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_tracker *tr_temp;
memset(&ctrlr, 0, sizeof(ctrlr));
TAILQ_INIT(&ctrlr.free_io_qpairs);
TAILQ_INIT(&ctrlr.active_io_qpairs);
TAILQ_INIT(&ctrlr.active_procs);
nvme_qpair_init(&qpair, 1, 128, &ctrlr);
nvme_qpair_destroy(&qpair);
nvme_qpair_init(&qpair, 0, 128, &ctrlr);
tr_temp = TAILQ_FIRST(&qpair.free_tr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
tr_temp->req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
tr_temp->req->cmd.cid = tr_temp->cid;
TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
nvme_qpair_destroy(&qpair);
CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding_tr));
}
#endif
static void test_nvme_completion_is_retry(void)
{
struct spdk_nvme_cpl cpl = {};
@ -739,28 +371,13 @@ int main(int argc, char **argv)
}
if (CU_add_test(suite, "test3", test3) == NULL
#if 0
|| CU_add_test(suite, "test4", test4) == NULL
#endif
|| CU_add_test(suite, "ctrlr_failed", test_ctrlr_failed) == NULL
|| CU_add_test(suite, "struct_packing", struct_packing) == NULL
#if 0
|| CU_add_test(suite, "nvme_qpair_fail", test_nvme_qpair_fail) == NULL
#endif
|| CU_add_test(suite, "spdk_nvme_qpair_process_completions",
test_nvme_qpair_process_completions) == NULL
#if 0
|| CU_add_test(suite, "spdk_nvme_qpair_process_completions_limit",
test_nvme_qpair_process_completions_limit) == NULL
|| CU_add_test(suite, "nvme_qpair_destroy", test_nvme_qpair_destroy) == NULL
#endif
|| CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
#ifdef DEBUG
|| CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
#endif
#if 0
|| CU_add_test(suite, "sgl_request", test_sgl_req) == NULL
|| CU_add_test(suite, "hw_sgl_request", test_hw_sgl_req) == NULL
#endif
) {
CU_cleanup_registry();

View File

@ -9,6 +9,7 @@ test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut
test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut
test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut
test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut
test/lib/nvme/unit/nvme_pcie_c/nvme_pcie_ut
test/lib/ioat/unit/ioat_ut