nvmf: Replace transport controller with poll_group

For now, this is a name change of controller to poll_group
in the transport layer. Over time, the poll_group will
become a more general concept than a controller, allowing
for qpairs to be spread across cores.

Change-Id: Ia92a2934541ad336f462f73175d53aaaf021f67b
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/371775
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2017-07-28 11:21:45 -07:00 committed by Jim Harris
parent 6fb907323f
commit baa936a173
7 changed files with 131 additions and 113 deletions

View File

@ -163,7 +163,8 @@ nvmf_init_nvme_ctrlr_properties(struct spdk_nvmf_ctrlr *ctrlr)
static void ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
TAILQ_REMOVE(&ctrlr->subsys->ctrlrs, ctrlr, link);
spdk_nvmf_transport_ctrlr_fini(ctrlr);
spdk_nvmf_transport_poll_group_destroy(ctrlr->group);
free(ctrlr);
}
void
@ -288,13 +289,21 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
}
/* Establish a new ctrlr */
ctrlr = spdk_nvmf_transport_ctrlr_init(qpair->transport);
if (ctrlr == NULL) {
ctrlr = calloc(1, sizeof(*ctrlr));
if (!ctrlr) {
SPDK_ERRLOG("Memory allocation failure\n");
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return;
}
ctrlr->group = spdk_nvmf_transport_poll_group_create(qpair->transport);
if (ctrlr->group == NULL) {
SPDK_ERRLOG("Memory allocation failure\n");
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
free(ctrlr);
return;
}
TAILQ_INIT(&ctrlr->qpairs);
ctrlr->cntlid = spdk_nvmf_ctrlr_gen_cntlid();
@ -302,6 +311,8 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
/* Unable to get a cntlid */
SPDK_ERRLOG("Reached max simultaneous ctrlrs\n");
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
spdk_nvmf_transport_poll_group_destroy(ctrlr->group);
free(ctrlr);
return;
}
@ -313,9 +324,9 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
memcpy(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid));
if (spdk_nvmf_transport_ctrlr_add_qpair(ctrlr, qpair)) {
if (spdk_nvmf_transport_poll_group_add(ctrlr->group, qpair)) {
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
spdk_nvmf_transport_ctrlr_fini(ctrlr);
spdk_nvmf_transport_poll_group_destroy(ctrlr->group);
free(ctrlr);
return;
}
@ -374,7 +385,7 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
return;
}
if (spdk_nvmf_transport_ctrlr_add_qpair(ctrlr, qpair)) {
if (spdk_nvmf_transport_poll_group_add(ctrlr->group, qpair)) {
INVALID_CONNECT_CMD(qid);
return;
}
@ -399,7 +410,7 @@ spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_qpair *qpair)
ctrlr->num_qpairs--;
TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
spdk_nvmf_transport_ctrlr_remove_qpair(ctrlr, qpair);
spdk_nvmf_transport_poll_group_remove(ctrlr->group, qpair);
spdk_nvmf_transport_qpair_fini(qpair);
if (ctrlr->num_qpairs == 0) {

View File

@ -92,7 +92,7 @@ struct spdk_nvmf_ctrlr {
} async_event_config;
struct spdk_nvmf_request *aer_req;
uint8_t hostid[16];
struct spdk_nvmf_transport *transport;
struct spdk_nvmf_poll_group *group;
TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
};

View File

@ -90,6 +90,11 @@ struct spdk_nvmf_tgt {
TAILQ_HEAD(, spdk_nvmf_transport) transports;
};
struct spdk_nvmf_poll_group {
struct spdk_nvmf_transport *transport;
TAILQ_ENTRY(spdk_nvmf_poll_group) link;
};
extern struct spdk_nvmf_tgt g_nvmf_tgt;
struct spdk_nvmf_listen_addr *spdk_nvmf_listen_addr_create(struct spdk_nvme_transport_id *trid);

View File

@ -166,8 +166,8 @@ struct spdk_nvmf_rdma_qpair {
/* List of RDMA connections that have not yet received a CONNECT capsule */
static TAILQ_HEAD(, spdk_nvmf_rdma_qpair) g_pending_conns = TAILQ_HEAD_INITIALIZER(g_pending_conns);
struct spdk_nvmf_rdma_ctrlr {
struct spdk_nvmf_ctrlr ctrlr;
struct spdk_nvmf_rdma_poll_group {
struct spdk_nvmf_poll_group group;
SLIST_HEAD(, spdk_nvmf_rdma_buf) data_buf_pool;
@ -718,11 +718,11 @@ typedef enum _spdk_nvmf_request_prep_type {
static spdk_nvmf_request_prep_type
spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cmd *cmd;
struct spdk_nvme_cpl *rsp;
struct spdk_nvmf_rdma_request *rdma_req;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvme_sgl_descriptor *sgl;
struct spdk_nvme_cmd *cmd;
struct spdk_nvme_cpl *rsp;
struct spdk_nvmf_rdma_request *rdma_req;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvme_sgl_descriptor *sgl;
cmd = &req->cmd->nvme_cmd;
rsp = &req->rsp->nvme_cpl;
@ -776,8 +776,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;
rdma_ctrlr = SPDK_CONTAINEROF(req->qpair->ctrlr, struct spdk_nvmf_rdma_ctrlr, ctrlr);
if (!rdma_ctrlr) {
if (!req->qpair->ctrlr) {
/* The only time a connection won't have a ctrlr
* is when this is the CONNECT request.
*/
@ -792,8 +791,9 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
qpair)->bufs_mr->lkey;
rdma_req->data_from_pool = false;
} else {
req->data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool);
rdma_req->data.sgl[0].lkey = rdma_ctrlr->buf_mr->lkey;
rgroup = SPDK_CONTAINEROF(req->qpair->ctrlr->group, struct spdk_nvmf_rdma_poll_group, group);
req->data = SLIST_FIRST(&rgroup->data_buf_pool);
rdma_req->data.sgl[0].lkey = rgroup->buf_mr->lkey;
rdma_req->data_from_pool = true;
if (!req->data) {
/* No available buffers. Queue this request up. */
@ -804,7 +804,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
}
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request %p took buffer from central pool\n", req);
SLIST_REMOVE_HEAD(&rdma_ctrlr->data_buf_pool, link);
SLIST_REMOVE_HEAD(&rgroup->data_buf_pool, link);
}
rdma_req->data.sgl[0].addr = (uintptr_t)req->data;
@ -857,24 +857,24 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
static int
spdk_nvmf_rdma_handle_pending_rdma_rw(struct spdk_nvmf_qpair *qpair)
{
struct spdk_nvmf_rdma_qpair *rdma_qpair;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_request *rdma_req, *tmp;
int rc;
int count = 0;
struct spdk_nvmf_rdma_qpair *rdma_qpair;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_request *rdma_req, *tmp;
int rc;
int count = 0;
rdma_qpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
/* First, try to assign free data buffers to requests that need one */
if (qpair->ctrlr) {
rdma_ctrlr = SPDK_CONTAINEROF(qpair->ctrlr, struct spdk_nvmf_rdma_ctrlr, ctrlr);
rgroup = SPDK_CONTAINEROF(qpair->ctrlr->group, struct spdk_nvmf_rdma_poll_group, group);
TAILQ_FOREACH_SAFE(rdma_req, &rdma_qpair->pending_data_buf_queue, link, tmp) {
assert(rdma_req->req.data == NULL);
rdma_req->req.data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool);
rdma_req->req.data = SLIST_FIRST(&rgroup->data_buf_pool);
if (!rdma_req->req.data) {
break;
}
SLIST_REMOVE_HEAD(&rdma_ctrlr->data_buf_pool, link);
SLIST_REMOVE_HEAD(&rgroup->data_buf_pool, link);
rdma_req->data.sgl[0].addr = (uintptr_t)rdma_req->req.data;
TAILQ_REMOVE(&rdma_qpair->pending_data_buf_queue, rdma_req, link);
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
@ -1192,99 +1192,98 @@ spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM;
}
static struct spdk_nvmf_ctrlr *
spdk_nvmf_rdma_ctrlr_init(struct spdk_nvmf_transport *transport)
static struct spdk_nvmf_poll_group *
spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
{
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_poll_group *rgroup;
int i;
struct spdk_nvmf_rdma_buf *buf;
rdma_ctrlr = calloc(1, sizeof(*rdma_ctrlr));
if (!rdma_ctrlr) {
rgroup = calloc(1, sizeof(*rgroup));
if (!rgroup) {
return NULL;
}
/* TODO: Make the number of elements in this pool configurable. For now, one full queue
* worth seems reasonable.
*/
rdma_ctrlr->buf = spdk_dma_zmalloc(g_rdma.max_queue_depth * g_rdma.max_io_size,
0x20000, NULL);
if (!rdma_ctrlr->buf) {
rgroup->buf = spdk_dma_zmalloc(g_rdma.max_queue_depth * g_rdma.max_io_size,
0x20000, NULL);
if (!rgroup->buf) {
SPDK_ERRLOG("Large buffer pool allocation failed (%d x %d)\n",
g_rdma.max_queue_depth, g_rdma.max_io_size);
free(rdma_ctrlr);
free(rgroup);
return NULL;
}
SLIST_INIT(&rdma_ctrlr->data_buf_pool);
SLIST_INIT(&rgroup->data_buf_pool);
for (i = 0; i < g_rdma.max_queue_depth; i++) {
buf = (struct spdk_nvmf_rdma_buf *)(rdma_ctrlr->buf + (i * g_rdma.max_io_size));
SLIST_INSERT_HEAD(&rdma_ctrlr->data_buf_pool, buf, link);
buf = (struct spdk_nvmf_rdma_buf *)(rgroup->buf + (i * g_rdma.max_io_size));
SLIST_INSERT_HEAD(&rgroup->data_buf_pool, buf, link);
}
rdma_ctrlr->ctrlr.transport = transport;
return &rdma_ctrlr->ctrlr;
return &rgroup->group;
}
static void
spdk_nvmf_rdma_ctrlr_fini(struct spdk_nvmf_ctrlr *ctrlr)
spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_poll_group *group)
{
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_poll_group *rgroup;
rdma_ctrlr = SPDK_CONTAINEROF(ctrlr, struct spdk_nvmf_rdma_ctrlr, ctrlr);
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
if (!rdma_ctrlr) {
if (!rgroup) {
return;
}
ibv_dereg_mr(rdma_ctrlr->buf_mr);
spdk_dma_free(rdma_ctrlr->buf);
free(rdma_ctrlr);
ibv_dereg_mr(rgroup->buf_mr);
spdk_dma_free(rgroup->buf);
free(rgroup);
}
static int
spdk_nvmf_rdma_ctrlr_add_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair)
spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_qpair *rdma_qpair;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_qpair *rdma_qpair;
rdma_ctrlr = SPDK_CONTAINEROF(ctrlr, struct spdk_nvmf_rdma_ctrlr, ctrlr);
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
rdma_qpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
if (rdma_ctrlr->verbs != NULL) {
if (rdma_ctrlr->verbs != rdma_qpair->cm_id->verbs) {
SPDK_ERRLOG("Two connections belonging to the same ctrlr cannot connect using different RDMA devices.\n");
if (rgroup->verbs != NULL) {
if (rgroup->verbs != rdma_qpair->cm_id->verbs) {
SPDK_ERRLOG("Attempted to add a qpair to a poll group with mismatched RDMA devices.\n");
return -1;
}
/* TODO: This actually needs to add the qpairs to an internal list! */
/* Nothing else to do. */
return 0;
}
rdma_ctrlr->verbs = rdma_qpair->cm_id->verbs;
rdma_ctrlr->buf_mr = ibv_reg_mr(rdma_qpair->cm_id->pd, rdma_ctrlr->buf,
g_rdma.max_queue_depth * g_rdma.max_io_size,
IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE);
if (!rdma_ctrlr->buf_mr) {
rgroup->verbs = rdma_qpair->cm_id->verbs;
rgroup->buf_mr = ibv_reg_mr(rdma_qpair->cm_id->pd, rgroup->buf,
g_rdma.max_queue_depth * g_rdma.max_io_size,
IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE);
if (!rgroup->buf_mr) {
SPDK_ERRLOG("Large buffer pool registration failed (%d x %d)\n",
g_rdma.max_queue_depth, g_rdma.max_io_size);
spdk_dma_free(rdma_ctrlr->buf);
free(rdma_ctrlr);
spdk_dma_free(rgroup->buf);
free(rgroup);
return -1;
}
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Controller session Shared Data Pool: %p Length: %x LKey: %x\n",
rdma_ctrlr->buf, g_rdma.max_queue_depth * g_rdma.max_io_size, rdma_ctrlr->buf_mr->lkey);
rgroup->buf, g_rdma.max_queue_depth * g_rdma.max_io_size, rgroup->buf_mr->lkey);
return 0;
}
static int
spdk_nvmf_rdma_ctrlr_remove_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair)
spdk_nvmf_rdma_poll_group_remove(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
return 0;
}
@ -1310,17 +1309,17 @@ request_release_buffer(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_rdma_request *rdma_req;
struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_buf *buf;
rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
if (rdma_req->data_from_pool) {
/* Put the buffer back in the pool */
rdma_ctrlr = SPDK_CONTAINEROF(qpair->ctrlr, struct spdk_nvmf_rdma_ctrlr, ctrlr);
rgroup = SPDK_CONTAINEROF(qpair->ctrlr->group, struct spdk_nvmf_rdma_poll_group, group);
buf = req->data;
SLIST_INSERT_HEAD(&rdma_ctrlr->data_buf_pool, buf, link);
SLIST_INSERT_HEAD(&rgroup->data_buf_pool, buf, link);
req->data = NULL;
req->length = 0;
rdma_req->data_from_pool = false;
@ -1597,10 +1596,10 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
.listen_addr_discover = spdk_nvmf_rdma_discover,
.ctrlr_init = spdk_nvmf_rdma_ctrlr_init,
.ctrlr_fini = spdk_nvmf_rdma_ctrlr_fini,
.ctrlr_add_qpair = spdk_nvmf_rdma_ctrlr_add_qpair,
.ctrlr_remove_qpair = spdk_nvmf_rdma_ctrlr_remove_qpair,
.poll_group_create = spdk_nvmf_rdma_poll_group_create,
.poll_group_destroy = spdk_nvmf_rdma_poll_group_destroy,
.poll_group_add = spdk_nvmf_rdma_poll_group_add,
.poll_group_remove = spdk_nvmf_rdma_poll_group_remove,
.req_complete = spdk_nvmf_rdma_request_complete,

View File

@ -120,44 +120,44 @@ spdk_nvmf_transport_listen_addr_discover(struct spdk_nvmf_transport *transport,
transport->ops->listen_addr_discover(transport, listen_addr, entry);
}
struct spdk_nvmf_ctrlr *
spdk_nvmf_transport_ctrlr_init(struct spdk_nvmf_transport *transport)
struct spdk_nvmf_poll_group *
spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
{
struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_poll_group *group;
ctrlr = transport->ops->ctrlr_init(transport);
ctrlr->transport = transport;
group = transport->ops->poll_group_create(transport);
group->transport = transport;
return ctrlr;
return group;
}
void
spdk_nvmf_transport_ctrlr_fini(struct spdk_nvmf_ctrlr *ctrlr)
spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_poll_group *group)
{
ctrlr->transport->ops->ctrlr_fini(ctrlr);
group->transport->ops->poll_group_destroy(group);
}
int
spdk_nvmf_transport_ctrlr_add_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair)
spdk_nvmf_transport_poll_group_add(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
if (qpair->transport) {
assert(qpair->transport == ctrlr->transport);
if (qpair->transport != ctrlr->transport) {
assert(qpair->transport == group->transport);
if (qpair->transport != group->transport) {
return -1;
}
} else {
qpair->transport = ctrlr->transport;
qpair->transport = group->transport;
}
return ctrlr->transport->ops->ctrlr_add_qpair(ctrlr, qpair);
return group->transport->ops->poll_group_add(group, qpair);
}
int
spdk_nvmf_transport_ctrlr_remove_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair)
spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
return ctrlr->transport->ops->ctrlr_remove_qpair(ctrlr, qpair);
return group->transport->ops->poll_group_remove(group, qpair);
}
int

View File

@ -90,24 +90,26 @@ struct spdk_nvmf_transport_ops {
struct spdk_nvmf_discovery_log_page_entry *entry);
/**
* Create a new ctrlr
* Create a new poll group
*/
struct spdk_nvmf_ctrlr *(*ctrlr_init)(struct spdk_nvmf_transport *transport);
struct spdk_nvmf_poll_group *(*poll_group_create)(struct spdk_nvmf_transport *transport);
/**
* Destroy a ctrlr
* Destroy a poll group
*/
void (*ctrlr_fini)(struct spdk_nvmf_ctrlr *ctrlr);
void (*poll_group_destroy)(struct spdk_nvmf_poll_group *group);
/**
* Add a qpair to a ctrlr
* Add a qpair to a poll group
*/
int (*ctrlr_add_qpair)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_qpair *qpair);
int (*poll_group_add)(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair);
/**
* Remove a qpair from a ctrlr
* Remove a qpair from a poll group
*/
int (*ctrlr_remove_qpair)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_qpair *qpair);
int (*poll_group_remove)(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair);
/*
* Signal request completion, which sends a response
@ -147,14 +149,15 @@ void spdk_nvmf_transport_listen_addr_discover(struct spdk_nvmf_transport *transp
struct spdk_nvmf_listen_addr *listen_addr,
struct spdk_nvmf_discovery_log_page_entry *entry);
struct spdk_nvmf_ctrlr *spdk_nvmf_transport_ctrlr_init(struct spdk_nvmf_transport *transport);
struct spdk_nvmf_poll_group *spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport
*transport);
void spdk_nvmf_transport_ctrlr_fini(struct spdk_nvmf_ctrlr *ctrlr);
void spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_poll_group *group);
int spdk_nvmf_transport_ctrlr_add_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_transport_poll_group_add(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_transport_ctrlr_remove_qpair(struct spdk_nvmf_ctrlr *ctrlr,
int spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_transport_req_complete(struct spdk_nvmf_request *req);

View File

@ -59,14 +59,14 @@ spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
return NULL;
}
struct spdk_nvmf_ctrlr *
spdk_nvmf_transport_ctrlr_init(struct spdk_nvmf_transport *transport)
struct spdk_nvmf_poll_group *
spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
{
return NULL;
}
void
spdk_nvmf_transport_ctrlr_fini(struct spdk_nvmf_ctrlr *ctrlr)
spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_poll_group *group)
{
}
@ -76,15 +76,15 @@ spdk_nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair)
}
int
spdk_nvmf_transport_ctrlr_add_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair)
spdk_nvmf_transport_poll_group_add(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
return 0;
}
int
spdk_nvmf_transport_ctrlr_remove_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_qpair *qpair)
spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
return 0;
}