lib/nvme: add a new API function for connecting I/O qpairs
connect_io_qpair essentially allows us to split the qpair allocation process in half which will make it possible for us to do more sophisticated things with RDMA qpairs in poll groups. as a companion to this new API, a connect_only option has been added to the io_qpair_opts struct which instructs alloc_io_qpair to only allocate the qpair and not connect it. Signed-off-by: Seth Howell <seth.howell@intel.com> Change-Id: I9ba9502dd39436006a9ac71436dd1871d648ed1c Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1123 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
649367850a
commit
7b4558e356
@ -1147,6 +1147,13 @@ struct spdk_nvme_io_qpair_opts {
|
||||
uint64_t paddr;
|
||||
uint64_t buffer_size;
|
||||
} cq;
|
||||
|
||||
/**
|
||||
* This flag indicates to the alloc_io_qpair function that it should not perform
|
||||
* the connect portion on this qpair. This allows the user to add the qpair to a
|
||||
* poll group and then connect it later.
|
||||
*/
|
||||
bool create_only;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1164,6 +1171,10 @@ void spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
|
||||
/**
|
||||
* Allocate an I/O queue pair (submission and completion queue).
|
||||
*
|
||||
* This function by default also performs any connection activities required for
|
||||
* a newly created qpair. To avoid that behavior, the user should set the create_only
|
||||
* flag in the opts structure to true.
|
||||
*
|
||||
* Each queue pair should only be used from a single thread at a time (mutual
|
||||
* exclusion must be enforced by the user).
|
||||
*
|
||||
@ -1179,6 +1190,27 @@ struct spdk_nvme_qpair *spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *c
|
||||
const struct spdk_nvme_io_qpair_opts *opts,
|
||||
size_t opts_size);
|
||||
|
||||
/**
|
||||
* Connect a newly created I/O qpair.
|
||||
*
|
||||
* This function does any connection activities required for a newly created qpair.
|
||||
* It should be called after spdk_nvme_ctrlr_alloc_io_qpair has been called with the
|
||||
* create_only flag set to true in the spdk_nvme_io_qpair_opts structure.
|
||||
*
|
||||
* This call will fail if performed on a qpair that is already connected.
|
||||
* For reconnecting qpairs, see spdk_nvme_ctrlr_reconnect_io_qpair.
|
||||
*
|
||||
* For fabrics like TCP and RDMA, this function actually sends the commands over the wire
|
||||
* that connect the qpair. For PCIe, this function performs some internal state machine operations.
|
||||
*
|
||||
* \param ctrlr NVMe controller for which to allocate the I/O queue pair.
|
||||
* \param qpair Opaque handle to the qpair to connect.
|
||||
*
|
||||
* return 0 on success or negated errno on failure. Specifically -EISCONN if the qpair is already connected.
|
||||
*
|
||||
*/
|
||||
int spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
|
||||
|
||||
/**
|
||||
* Attempt to reconnect the given qpair.
|
||||
*
|
||||
@ -1188,6 +1220,13 @@ struct spdk_nvme_qpair *spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *c
|
||||
* This function must be called from the same thread as spdk_nvme_qpair_process_completions
|
||||
* and the spdk_nvme_ns_cmd_* functions.
|
||||
*
|
||||
* Calling this function has the same effect as calling spdk_nvme_ctrlr_disconnect_io_qpair
|
||||
* followed by spdk_nvme_ctrlr_connect_io_qpair.
|
||||
*
|
||||
* This function may be called on newly created qpairs, but it does extra checks and attempts
|
||||
* to disconnect the qpair before connecting it. The recommended API for newly created qpairs
|
||||
* is spdk_nvme_ctrlr_connect_io_qpair.
|
||||
*
|
||||
* \param qpair The qpair to reconnect.
|
||||
*
|
||||
* \return 0 on success, or if the qpair was already connected.
|
||||
|
@ -283,23 +283,103 @@ spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
|
||||
opts->cq.buffer_size = 0;
|
||||
}
|
||||
|
||||
if (FIELD_OK(create_only)) {
|
||||
opts->create_only = false;
|
||||
}
|
||||
|
||||
#undef FIELD_OK
|
||||
}
|
||||
|
||||
static struct spdk_nvme_qpair *
|
||||
nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
const struct spdk_nvme_io_qpair_opts *opts)
|
||||
{
|
||||
uint32_t qid;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
union spdk_nvme_cc_register cc;
|
||||
|
||||
if (!ctrlr) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
|
||||
SPDK_ERRLOG("get_cc failed\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
|
||||
* default round robin arbitration method.
|
||||
*/
|
||||
if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
|
||||
SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the first available I/O queue ID.
|
||||
*/
|
||||
qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
|
||||
if (qid > ctrlr->opts.num_io_queues) {
|
||||
SPDK_ERRLOG("No free I/O queue IDs\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
|
||||
if (qpair == NULL) {
|
||||
SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spdk_bit_array_clear(ctrlr->free_io_qids, qid);
|
||||
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
|
||||
|
||||
nvme_ctrlr_proc_add_io_qpair(qpair);
|
||||
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
|
||||
return qpair;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISABLED) {
|
||||
return -EISCONN;
|
||||
}
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
|
||||
if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
|
||||
spdk_delay_us(100);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct spdk_nvme_qpair *
|
||||
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
const struct spdk_nvme_io_qpair_opts *user_opts,
|
||||
size_t opts_size)
|
||||
{
|
||||
uint32_t qid;
|
||||
int rc;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
union spdk_nvme_cc_register cc;
|
||||
struct spdk_nvme_io_qpair_opts opts;
|
||||
|
||||
if (!ctrlr) {
|
||||
return NULL;
|
||||
}
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
struct spdk_nvme_io_qpair_opts opts;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Get the default options, then overwrite them with the user-provided options
|
||||
@ -329,63 +409,18 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
}
|
||||
}
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
|
||||
SPDK_ERRLOG("get_cc failed\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
|
||||
|
||||
if (qpair == NULL || opts.create_only == true) {
|
||||
return qpair;
|
||||
}
|
||||
|
||||
if (opts.qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
|
||||
* default round robin arbitration method.
|
||||
*/
|
||||
if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) {
|
||||
SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the first available I/O queue ID.
|
||||
*/
|
||||
qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
|
||||
if (qid > ctrlr->opts.num_io_queues) {
|
||||
SPDK_ERRLOG("No free I/O queue IDs\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts);
|
||||
if (qpair == NULL) {
|
||||
SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
|
||||
rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("nvme_transport_ctrlr_connect_io_qpair() failed\n");
|
||||
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
|
||||
nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return NULL;
|
||||
}
|
||||
spdk_bit_array_clear(ctrlr->free_io_qids, qid);
|
||||
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
|
||||
|
||||
nvme_ctrlr_proc_add_io_qpair(qpair);
|
||||
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
|
||||
if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
|
||||
spdk_delay_us(100);
|
||||
}
|
||||
|
||||
return qpair;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user