nvme: broke up spdk_nvme_probe_internal() into two stages

Previously, function spdk_nvme_probe_internal() will probe
NVMe controllers and then bring up probed controllers
into the ready state after that.  Broke up original two parts
with probe and start stage, this will help us to introduce
a probe context in the next patch.

Change-Id: Ie0c55a6a5463fb437f84349b0b2b33a217ba63e0
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/c/426303
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Changpeng Liu 2018-09-20 01:21:20 -04:00
parent df517550cc
commit 207353960f
2 changed files with 85 additions and 71 deletions

View File

@ -421,39 +421,29 @@ nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid, void *devhandle,
}
static int
nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, void *cb_ctx,
spdk_nvme_attach_cb attach_cb)
{
int rc = 0;
int start_rc;
struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;
rc = nvme_ctrlr_process_init(ctrlr);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
/* Initialize all new controllers in the g_nvme_init_ctrlrs list in parallel. */
while (!TAILQ_EMPTY(&g_nvme_init_ctrlrs)) {
TAILQ_FOREACH_SAFE(ctrlr, &g_nvme_init_ctrlrs, tailq, ctrlr_tmp) {
/* Drop the driver lock while calling nvme_ctrlr_process_init()
* since it needs to acquire the driver lock internally when initializing
* controller.
*
* TODO: Rethink the locking - maybe reset should take the lock so that start() and
* the functions it calls (in particular nvme_ctrlr_set_num_qpairs())
* can assume it is held.
*/
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
start_rc = nvme_ctrlr_process_init(ctrlr);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
if (start_rc) {
if (rc) {
/* Controller failed to initialize. */
TAILQ_REMOVE(&g_nvme_init_ctrlrs, ctrlr, tailq);
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
SPDK_ERRLOG("Failed to initialize SSD: %s\n", ctrlr->trid.traddr);
nvme_ctrlr_destruct(ctrlr);
rc = -1;
break;
return rc;
}
if (ctrlr->state != NVME_CTRLR_STATE_READY) {
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
return 0;
}
if (ctrlr->state == NVME_CTRLR_STATE_READY) {
/*
* Controller has been initialized.
* Move it to the attached_ctrlrs list.
@ -478,9 +468,28 @@ nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
if (attach_cb) {
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
attach_cb(cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
return 0;
}
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
return 0;
}
static int
nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
{
int rc = 0;
struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
/* Initialize all new controllers in the g_nvme_init_ctrlrs list in parallel. */
while (!TAILQ_EMPTY(&g_nvme_init_ctrlrs)) {
TAILQ_FOREACH_SAFE(ctrlr, &g_nvme_init_ctrlrs, tailq, ctrlr_tmp) {
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
rc = nvme_ctrlr_poll_internal(ctrlr, cb_ctx, attach_cb);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
if (rc) {
break;
}
}
@ -532,11 +541,10 @@ spdk_nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
static int
spdk_nvme_probe_internal(const struct spdk_nvme_transport_id *trid, void *cb_ctx,
spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
spdk_nvme_remove_cb remove_cb, struct spdk_nvme_ctrlr **connected_ctrlr)
spdk_nvme_remove_cb remove_cb, bool direct_connect)
{
int rc;
struct spdk_nvme_ctrlr *ctrlr;
bool direct_connect = (connected_ctrlr != NULL);
if (!spdk_nvme_transport_available(trid->trtype)) {
SPDK_ERRLOG("NVMe trtype %u not available\n", trid->trtype);
@ -580,28 +588,11 @@ spdk_nvme_probe_internal(const struct spdk_nvme_transport_id *trid, void *cb_ctx
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
}
}
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
rc = 0;
goto exit;
}
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
/*
* Keep going even if one or more nvme_attach() calls failed,
* but maintain the value of rc to signal errors when we return.
*/
rc = nvme_init_controllers(cb_ctx, attach_cb);
exit:
if (connected_ctrlr) {
*connected_ctrlr = spdk_nvme_get_ctrlr_by_trid(trid);
}
return rc;
return 0;
}
int
@ -623,7 +614,21 @@ spdk_nvme_probe(const struct spdk_nvme_transport_id *trid, void *cb_ctx,
trid = &trid_pcie;
}
return spdk_nvme_probe_internal(trid, cb_ctx, probe_cb, attach_cb, remove_cb, NULL);
rc = spdk_nvme_probe_internal(trid, cb_ctx, probe_cb, attach_cb, remove_cb, false);
if (rc != 0) {
return rc;
}
if (spdk_process_is_primary() || trid->trtype != SPDK_NVME_TRANSPORT_PCIE) {
/*
* Keep going even if one or more nvme_attach() calls failed,
* but maintain the value of rc to signal errors when we return.
*/
rc = nvme_init_controllers(cb_ctx, attach_cb);
}
return rc;
}
static bool
@ -668,7 +673,16 @@ spdk_nvme_connect(const struct spdk_nvme_transport_id *trid,
probe_cb = spdk_nvme_connect_probe_cb;
}
spdk_nvme_probe_internal(trid, user_connect_opts, probe_cb, NULL, NULL, &ctrlr);
spdk_nvme_probe_internal(trid, user_connect_opts, probe_cb, NULL, NULL, true);
if (spdk_process_is_primary() || trid->trtype != SPDK_NVME_TRANSPORT_PCIE) {
rc = nvme_init_controllers(NULL, NULL);
if (rc != 0) {
return NULL;
}
}
ctrlr = spdk_nvme_get_ctrlr_by_trid(trid);
return ctrlr;
}

View File

@ -287,7 +287,7 @@ test_nvme_init_controllers(void)
g_spdk_nvme_driver->initialized = false;
ut_destruct_called = false;
rc = nvme_init_controllers(cb_ctx, attach_cb);
CU_ASSERT(rc == -1);
CU_ASSERT(rc != 0);
CU_ASSERT(g_spdk_nvme_driver->initialized == true);
CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
CU_ASSERT(ut_destruct_called == true);