vhost/nvme: generic cleanup

* don't iterate through g_nvme_ctrlrs when it's unnecessary
* fixup a potential deadlock on session stop error
  (which can't practically happen unless the SPDK generic
   vhost layer is malfunctioning)
* add a FIXME note to wait for pending I/Os before putting
  bdev io channels and stopping the vhost pollers.

Change-Id: I576c4771f51e432fbbab244fd1b91668436004bf
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448224
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Darek Stojaczyk 2019-03-17 00:20:49 +01:00 committed by Jim Harris
parent 3b760a4d09
commit b4abd4c9d9

View File

@ -1156,32 +1156,29 @@ static int
destroy_device_poller_cb(void *arg)
{
struct spdk_vhost_nvme_dev *nvme = arg;
struct spdk_vhost_nvme_dev *dev, *tmp;
struct spdk_vhost_nvme_ns *ns_dev;
uint32_t i;
SPDK_DEBUGLOG(SPDK_LOG_VHOST_NVME, "Destroy device poller callback\n");
TAILQ_FOREACH_SAFE(dev, &g_nvme_ctrlrs, tailq, tmp) {
if (dev == nvme) {
for (i = 0; i < nvme->num_ns; i++) {
ns_dev = &nvme->ns[i];
if (ns_dev->bdev_io_channel) {
spdk_put_io_channel(ns_dev->bdev_io_channel);
ns_dev->bdev_io_channel = NULL;
}
}
/* Clear BAR space */
if (nvme->bar) {
memset((void *)nvme->bar, 0, nvme->bar_size);
}
nvme->num_sqs = 0;
nvme->num_cqs = 0;
nvme->dbbuf_dbs = NULL;
nvme->dbbuf_eis = NULL;
nvme->dataplane_started = false;
/* FIXME wait for pending I/Os to complete */
for (i = 0; i < nvme->num_ns; i++) {
ns_dev = &nvme->ns[i];
if (ns_dev->bdev_io_channel) {
spdk_put_io_channel(ns_dev->bdev_io_channel);
ns_dev->bdev_io_channel = NULL;
}
}
/* Clear BAR space */
if (nvme->bar) {
memset((void *)nvme->bar, 0, nvme->bar_size);
}
nvme->num_sqs = 0;
nvme->num_cqs = 0;
nvme->dbbuf_dbs = NULL;
nvme->dbbuf_eis = NULL;
nvme->dataplane_started = false;
spdk_poller_unregister(&nvme->destroy_ctx.poller);
spdk_vhost_session_event_done(nvme->vsession, 0);
@ -1196,6 +1193,7 @@ spdk_vhost_nvme_stop_cb(struct spdk_vhost_dev *vdev,
struct spdk_vhost_nvme_dev *nvme = to_nvme_dev(vdev);
if (nvme == NULL) {
spdk_vhost_session_event_done(vsession, -1);
return -1;
}
@ -1422,7 +1420,6 @@ int
spdk_vhost_nvme_dev_remove(struct spdk_vhost_dev *vdev)
{
struct spdk_vhost_nvme_dev *nvme = to_nvme_dev(vdev);
struct spdk_vhost_nvme_dev *dev, *tmp;
struct spdk_vhost_nvme_ns *ns;
int rc;
uint32_t i;
@ -1431,15 +1428,11 @@ spdk_vhost_nvme_dev_remove(struct spdk_vhost_dev *vdev)
return -EINVAL;
}
TAILQ_FOREACH_SAFE(dev, &g_nvme_ctrlrs, tailq, tmp) {
if (dev == nvme) {
TAILQ_REMOVE(&g_nvme_ctrlrs, dev, tailq);
for (i = 0; i < nvme->num_ns; i++) {
ns = &nvme->ns[i];
if (ns->active_ns) {
spdk_vhost_nvme_deactive_ns(ns);
}
}
TAILQ_REMOVE(&g_nvme_ctrlrs, nvme, tailq);
for (i = 0; i < nvme->num_ns; i++) {
ns = &nvme->ns[i];
if (ns->active_ns) {
spdk_vhost_nvme_deactive_ns(ns);
}
}