nvme: move I/O qpair allocation to transport
This requires a couple of related changes: - I/O queue IDs are now allocated by using a bit array of free queue IDs instead of keeping an array of pre-initialized qpair structures. - The "create I/O qpair" function has been split into two: one to create the queue pair at startup, and one to reinitialize an existing qpair structure after a reset. Change-Id: I4ff3bf79b40130044428516f233b07c839d1b548 Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
1bbe0c00bd
commit
c194ebd833
@ -105,6 +105,7 @@ struct spdk_nvme_qpair *
|
||||
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
enum spdk_nvme_qprio qprio)
|
||||
{
|
||||
uint32_t qid;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
union spdk_nvme_cc_register cc;
|
||||
|
||||
@ -130,31 +131,22 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
|
||||
/*
|
||||
* Get the first available qpair structure.
|
||||
* Get the first available I/O queue ID.
|
||||
*/
|
||||
qpair = TAILQ_FIRST(&ctrlr->free_io_qpairs);
|
||||
if (qpair == NULL) {
|
||||
/* No free queue IDs */
|
||||
qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
|
||||
if (qid > ctrlr->opts.num_io_queues) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "No free I/O queue IDs\n");
|
||||
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point, qpair contains a preallocated submission and completion queue and a
|
||||
* unique queue ID, but it is not yet created on the controller.
|
||||
*
|
||||
* Fill out the submission queue priority and send out the Create I/O Queue commands.
|
||||
*/
|
||||
qpair->qprio = qprio;
|
||||
if (ctrlr->transport->ctrlr_create_io_qpair(ctrlr, qpair)) {
|
||||
/*
|
||||
* ctrlr_create_io_qpair() failed, so the qpair structure is still unused.
|
||||
* Exit here so we don't insert it into the active_io_qpairs list.
|
||||
*/
|
||||
qpair = ctrlr->transport->ctrlr_create_io_qpair(ctrlr, qid, qprio);
|
||||
if (qpair == NULL) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport->ctrlr_create_io_qpair() failed\n");
|
||||
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
TAILQ_REMOVE(&ctrlr->free_io_qpairs, qpair, tailq);
|
||||
spdk_bit_array_clear(ctrlr->free_io_qids, qid);
|
||||
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
|
||||
|
||||
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
@ -175,14 +167,14 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
|
||||
|
||||
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
|
||||
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
|
||||
spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
|
||||
|
||||
if (ctrlr->transport->ctrlr_delete_io_qpair(ctrlr, qpair)) {
|
||||
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
|
||||
TAILQ_INSERT_HEAD(&ctrlr->free_io_qpairs, qpair, tailq);
|
||||
|
||||
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return 0;
|
||||
}
|
||||
@ -324,74 +316,15 @@ nvme_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
|
||||
ctrlr);
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_ctrlr_construct_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
union spdk_nvme_cap_register cap;
|
||||
uint32_t i, num_entries;
|
||||
int rc;
|
||||
uint64_t phys_addr = 0;
|
||||
|
||||
if (ctrlr->ioq != NULL) {
|
||||
/*
|
||||
* io_qpairs were already constructed, so just return.
|
||||
* This typically happens when the controller is
|
||||
* initialized a second (or subsequent) time after a
|
||||
* controller reset.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* NVMe spec sets a hard limit of 64K max entries, but
|
||||
* devices may specify a smaller limit, so we need to check
|
||||
* the MQES field in the capabilities register.
|
||||
*/
|
||||
num_entries = nvme_min(NVME_IO_ENTRIES, cap.bits.mqes + 1);
|
||||
|
||||
ctrlr->ioq = spdk_zmalloc(ctrlr->opts.num_io_queues * sizeof(struct spdk_nvme_qpair),
|
||||
64, &phys_addr);
|
||||
|
||||
if (ctrlr->ioq == NULL)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < ctrlr->opts.num_io_queues; i++) {
|
||||
qpair = &ctrlr->ioq[i];
|
||||
|
||||
/*
|
||||
* Admin queue has ID=0. IO queues start at ID=1 -
|
||||
* hence the 'i+1' here.
|
||||
*/
|
||||
rc = nvme_qpair_construct(qpair,
|
||||
i + 1, /* qpair ID */
|
||||
num_entries,
|
||||
ctrlr);
|
||||
if (rc)
|
||||
return -1;
|
||||
|
||||
TAILQ_INSERT_TAIL(&ctrlr->free_io_qpairs, qpair, tailq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
uint32_t i;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
|
||||
ctrlr->is_failed = true;
|
||||
nvme_qpair_fail(&ctrlr->adminq);
|
||||
if (ctrlr->ioq) {
|
||||
for (i = 0; i < ctrlr->opts.num_io_queues; i++) {
|
||||
nvme_qpair_fail(&ctrlr->ioq[i]);
|
||||
}
|
||||
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
||||
nvme_qpair_fail(qpair);
|
||||
}
|
||||
}
|
||||
|
||||
@ -532,7 +465,6 @@ int
|
||||
spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
int rc = 0;
|
||||
uint32_t i;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
|
||||
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
@ -553,8 +485,8 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
/* Disable all queues before disabling the controller hardware. */
|
||||
nvme_qpair_disable(&ctrlr->adminq);
|
||||
for (i = 0; i < ctrlr->opts.num_io_queues; i++) {
|
||||
nvme_qpair_disable(&ctrlr->ioq[i]);
|
||||
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
||||
nvme_qpair_disable(qpair);
|
||||
}
|
||||
|
||||
/* Set the state back to INIT to cause a full hardware reset. */
|
||||
@ -572,7 +504,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
||||
if (!ctrlr->is_failed) {
|
||||
/* Reinitialize qpairs */
|
||||
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
||||
if (ctrlr->transport->ctrlr_create_io_qpair(ctrlr, qpair) != 0) {
|
||||
if (ctrlr->transport->ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
|
||||
nvme_ctrlr_fail(ctrlr);
|
||||
rc = -1;
|
||||
}
|
||||
@ -626,6 +558,7 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
||||
struct nvme_completion_poll_status status;
|
||||
int cq_allocated, sq_allocated;
|
||||
int rc;
|
||||
uint32_t i;
|
||||
|
||||
status.done = false;
|
||||
|
||||
@ -662,6 +595,17 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
ctrlr->opts.num_io_queues = nvme_min(sq_allocated, cq_allocated);
|
||||
|
||||
ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
|
||||
if (ctrlr->free_io_qids == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
|
||||
spdk_bit_array_clear(ctrlr->free_io_qids, 0);
|
||||
for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
|
||||
spdk_bit_array_set(ctrlr->free_io_qids, i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -958,10 +902,6 @@ nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (nvme_ctrlr_construct_io_qpairs(ctrlr)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
|
||||
return -1;
|
||||
}
|
||||
@ -1005,6 +945,7 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
|
||||
ctrlr->flags = 0;
|
||||
ctrlr->free_io_qids = NULL;
|
||||
|
||||
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
|
||||
@ -1020,7 +961,6 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
ctrlr->is_resetting = false;
|
||||
ctrlr->is_failed = false;
|
||||
|
||||
TAILQ_INIT(&ctrlr->free_io_qpairs);
|
||||
TAILQ_INIT(&ctrlr->active_io_qpairs);
|
||||
|
||||
pthread_mutex_init_recursive(&ctrlr->ctrlr_lock);
|
||||
@ -1031,24 +971,18 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
void
|
||||
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
while (!TAILQ_EMPTY(&ctrlr->active_io_qpairs)) {
|
||||
struct spdk_nvme_qpair *qpair = TAILQ_FIRST(&ctrlr->active_io_qpairs);
|
||||
|
||||
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
||||
nvme_qpair_destroy(qpair);
|
||||
}
|
||||
|
||||
nvme_ctrlr_shutdown(ctrlr);
|
||||
|
||||
nvme_ctrlr_destruct_namespaces(ctrlr);
|
||||
if (ctrlr->ioq) {
|
||||
for (i = 0; i < ctrlr->opts.num_io_queues; i++) {
|
||||
nvme_qpair_destroy(&ctrlr->ioq[i]);
|
||||
}
|
||||
}
|
||||
|
||||
spdk_free(ctrlr->ioq);
|
||||
spdk_bit_array_free(&ctrlr->free_io_qids);
|
||||
|
||||
nvme_qpair_destroy(&ctrlr->adminq);
|
||||
|
||||
|
@ -52,6 +52,7 @@
|
||||
|
||||
#include "spdk/queue.h"
|
||||
#include "spdk/barrier.h"
|
||||
#include "spdk/bit_array.h"
|
||||
#include "spdk/log.h"
|
||||
#include "spdk/mmio.h"
|
||||
#include "spdk/pci_ids.h"
|
||||
@ -260,8 +261,10 @@ struct spdk_nvme_transport {
|
||||
int (*ctrlr_get_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
|
||||
int (*ctrlr_get_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
|
||||
|
||||
int (*ctrlr_create_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
|
||||
struct spdk_nvme_qpair *(*ctrlr_create_io_qpair)(struct spdk_nvme_ctrlr *ctrlr,
|
||||
uint16_t qid, enum spdk_nvme_qprio qprio);
|
||||
int (*ctrlr_delete_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
|
||||
int (*ctrlr_reinit_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
|
||||
|
||||
int (*qpair_construct)(struct spdk_nvme_qpair *qpair);
|
||||
void (*qpair_destroy)(struct spdk_nvme_qpair *qpair);
|
||||
@ -419,9 +422,6 @@ struct spdk_nvme_ctrlr {
|
||||
|
||||
const struct spdk_nvme_transport *transport;
|
||||
|
||||
/** I/O queue pairs */
|
||||
struct spdk_nvme_qpair *ioq;
|
||||
|
||||
/** Array of namespaces indexed by nsid - 1 */
|
||||
struct spdk_nvme_ns *ns;
|
||||
|
||||
@ -479,7 +479,7 @@ struct spdk_nvme_ctrlr {
|
||||
*/
|
||||
struct spdk_nvme_ns_data *nsdata;
|
||||
|
||||
TAILQ_HEAD(, spdk_nvme_qpair) free_io_qpairs;
|
||||
struct spdk_bit_array *free_io_qids;
|
||||
TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs;
|
||||
|
||||
struct spdk_nvme_ctrlr_opts opts;
|
||||
|
@ -785,13 +785,11 @@ nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||
_nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
|
||||
uint16_t qid)
|
||||
{
|
||||
struct nvme_completion_poll_status status;
|
||||
int rc;
|
||||
|
||||
assert(ctrlr != NULL);
|
||||
assert(qpair != NULL);
|
||||
int rc;
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
|
||||
@ -835,6 +833,60 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct spdk_nvme_qpair *
|
||||
nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
enum spdk_nvme_qprio qprio)
|
||||
{
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
union spdk_nvme_cap_register cap;
|
||||
uint32_t num_entries;
|
||||
int rc;
|
||||
|
||||
assert(ctrlr != NULL);
|
||||
|
||||
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qpair = calloc(1, sizeof(*qpair));
|
||||
if (qpair == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qpair->id = qid;
|
||||
qpair->qprio = qprio;
|
||||
|
||||
/*
|
||||
* NVMe spec sets a hard limit of 64K max entries, but
|
||||
* devices may specify a smaller limit, so we need to check
|
||||
* the MQES field in the capabilities register.
|
||||
*/
|
||||
num_entries = nvme_min(NVME_IO_ENTRIES, cap.bits.mqes + 1);
|
||||
|
||||
rc = nvme_qpair_construct(qpair, qid, num_entries, ctrlr);
|
||||
if (rc != 0) {
|
||||
free(qpair);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rc = _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qid);
|
||||
|
||||
if (rc != 0) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "I/O queue creation failed\n");
|
||||
free(qpair);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return qpair;
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_pcie_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
return _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id);
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
@ -870,6 +922,8 @@ nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
|
||||
return -1;
|
||||
}
|
||||
|
||||
free(qpair);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1242,6 +1296,7 @@ const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
|
||||
|
||||
.ctrlr_create_io_qpair = nvme_pcie_ctrlr_create_io_qpair,
|
||||
.ctrlr_delete_io_qpair = nvme_pcie_ctrlr_delete_io_qpair,
|
||||
.ctrlr_reinit_io_qpair = nvme_pcie_ctrlr_reinit_io_qpair,
|
||||
|
||||
.qpair_construct = nvme_pcie_qpair_construct,
|
||||
.qpair_destroy = nvme_pcie_qpair_destroy,
|
||||
|
@ -40,7 +40,8 @@ C_SRCS = $(TEST_FILE) $(OTHER_FILES)
|
||||
CFLAGS += -I$(SPDK_ROOT_DIR)/lib
|
||||
CFLAGS += -I$(SPDK_ROOT_DIR)/test
|
||||
|
||||
SPDK_LIBS += $(SPDK_ROOT_DIR)/lib/log/libspdk_log.a
|
||||
SPDK_LIBS += $(SPDK_ROOT_DIR)/lib/util/libspdk_util.a \
|
||||
$(SPDK_ROOT_DIR)/lib/log/libspdk_log.a
|
||||
|
||||
LIBS += -lcunit $(SPDK_LIBS)
|
||||
|
||||
|
@ -115,15 +115,25 @@ ut_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *val
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ut_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||
static struct spdk_nvme_qpair *
|
||||
ut_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_nvme_qprio qprio)
|
||||
{
|
||||
return 0;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
|
||||
qpair = calloc(1, sizeof(*qpair));
|
||||
SPDK_CU_ASSERT_FATAL(qpair != NULL);
|
||||
|
||||
qpair->ctrlr = ctrlr;
|
||||
qpair->id = qid;
|
||||
qpair->qprio = qprio;
|
||||
|
||||
return qpair;
|
||||
}
|
||||
|
||||
static int
|
||||
ut_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
free(qpair);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -882,13 +892,21 @@ test_nvme_ctrlr_init_en_0_rdy_1(void)
|
||||
static void
|
||||
setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
CU_ASSERT_FATAL(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
|
||||
ctrlr->transport = &nvme_ctrlr_ut_transport;
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
|
||||
|
||||
/* Fake out the parts of ctrlr needed for I/O qpair allocation */
|
||||
ctrlr->opts.num_io_queues = num_io_queues;
|
||||
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct_io_qpairs(ctrlr) == 0);
|
||||
ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
|
||||
SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
|
||||
|
||||
spdk_bit_array_clear(ctrlr->free_io_qids, 0);
|
||||
for (i = 1; i <= num_io_queues; i++) {
|
||||
spdk_bit_array_set(ctrlr->free_io_qids, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -221,7 +221,7 @@ prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
|
||||
{
|
||||
memset(ctrlr, 0, sizeof(*ctrlr));
|
||||
ctrlr->transport = &nvme_qpair_ut_transport;
|
||||
TAILQ_INIT(&ctrlr->free_io_qpairs);
|
||||
ctrlr->free_io_qids = NULL;
|
||||
TAILQ_INIT(&ctrlr->active_io_qpairs);
|
||||
nvme_qpair_construct(qpair, 1, 128, ctrlr);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user