bdev/nvme: Remove common.h/common.c

This only existed to share code between OCSSD and regular NVM
namespaces. Now OCSSD is gone, so just merge the files into bdev_nvme.

Change-Id: Idb73cc05d67144de5dd20af8db24c8f6974d10a7
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9337
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2021-08-30 11:30:18 -07:00 committed by Tomasz Zawadzki
parent a8b0293094
commit d409971b79
10 changed files with 385 additions and 465 deletions

View File

@ -37,7 +37,7 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 4
SO_MINOR := 0
C_SRCS = bdev_nvme.c bdev_nvme_rpc.c nvme_rpc.c common.c
C_SRCS = bdev_nvme.c bdev_nvme_rpc.c nvme_rpc.c
C_SRCS-$(CONFIG_NVME_CUSE) += bdev_nvme_cuse_rpc.c
ifeq ($(OS),Linux)

View File

@ -41,9 +41,11 @@
#include "spdk/endian.h"
#include "spdk/bdev.h"
#include "spdk/json.h"
#include "spdk/likely.h"
#include "spdk/nvme.h"
#include "spdk/nvme_ocssd.h"
#include "spdk/nvme_zns.h"
#include "spdk/opal.h"
#include "spdk/thread.h"
#include "spdk/string.h"
#include "spdk/util.h"
@ -223,6 +225,246 @@ static struct spdk_bdev_module nvme_if = {
};
SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if)
struct nvme_ctrlrs g_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_ctrlrs);
pthread_mutex_t g_bdev_nvme_mutex = PTHREAD_MUTEX_INITIALIZER;
bool g_bdev_nvme_module_finish;
struct nvme_ns *
nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid)
{
assert(nsid > 0);
assert(nsid <= nvme_ctrlr->num_ns);
if (nsid == 0 || nsid > nvme_ctrlr->num_ns) {
return NULL;
}
return nvme_ctrlr->namespaces[nsid - 1];
}
struct nvme_ns *
nvme_ctrlr_get_first_active_ns(struct nvme_ctrlr *nvme_ctrlr)
{
uint32_t i;
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
if (nvme_ctrlr->namespaces[i] != NULL) {
return nvme_ctrlr->namespaces[i];
}
}
return NULL;
}
struct nvme_ns *
nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns)
{
uint32_t i;
if (ns == NULL) {
return NULL;
}
/* ns->id is a 1's based value and we want to start at the next
* entry in this array, so we start at ns->id and don't subtract to
* convert to 0's based. */
for (i = ns->id; i < nvme_ctrlr->num_ns; i++) {
if (nvme_ctrlr->namespaces[i] != NULL) {
return nvme_ctrlr->namespaces[i];
}
}
return NULL;
}
struct nvme_ctrlr *
nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
{
struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) {
break;
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return nvme_ctrlr;
}
struct nvme_ctrlr *
nvme_ctrlr_get_by_name(const char *name)
{
struct nvme_ctrlr *nvme_ctrlr;
if (name == NULL) {
return NULL;
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (strcmp(name, nvme_ctrlr->name) == 0) {
break;
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return nvme_ctrlr;
}
void
nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx)
{
struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
fn(nvme_ctrlr, ctx);
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
void
nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w)
{
const char *trtype_str;
const char *adrfam_str;
trtype_str = spdk_nvme_transport_id_trtype_str(trid->trtype);
if (trtype_str) {
spdk_json_write_named_string(w, "trtype", trtype_str);
}
adrfam_str = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
if (adrfam_str) {
spdk_json_write_named_string(w, "adrfam", adrfam_str);
}
if (trid->traddr[0] != '\0') {
spdk_json_write_named_string(w, "traddr", trid->traddr);
}
if (trid->trsvcid[0] != '\0') {
spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
}
if (trid->subnqn[0] != '\0') {
spdk_json_write_named_string(w, "subnqn", trid->subnqn);
}
}
void
nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr)
{
struct nvme_ctrlr_trid *trid, *tmp_trid;
uint32_t i;
free(nvme_ctrlr->copied_ana_desc);
spdk_free(nvme_ctrlr->ana_log_page);
if (nvme_ctrlr->opal_dev) {
spdk_opal_dev_destruct(nvme_ctrlr->opal_dev);
nvme_ctrlr->opal_dev = NULL;
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_REMOVE(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_nvme_detach(nvme_ctrlr->ctrlr);
spdk_poller_unregister(&nvme_ctrlr->adminq_timer_poller);
free(nvme_ctrlr->name);
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
free(nvme_ctrlr->namespaces[i]);
}
TAILQ_FOREACH_SAFE(trid, &nvme_ctrlr->trids, link, tmp_trid) {
TAILQ_REMOVE(&nvme_ctrlr->trids, trid, link);
free(trid);
}
pthread_mutex_destroy(&nvme_ctrlr->mutex);
free(nvme_ctrlr->namespaces);
free(nvme_ctrlr);
}
static void
nvme_ctrlr_unregister_cb(void *io_device)
{
struct nvme_ctrlr *nvme_ctrlr = io_device;
nvme_ctrlr_delete(nvme_ctrlr);
pthread_mutex_lock(&g_bdev_nvme_mutex);
if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_ctrlrs)) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_io_device_unregister(&g_nvme_ctrlrs, NULL);
spdk_bdev_module_fini_done();
return;
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
void
nvme_ctrlr_unregister(void *ctx)
{
struct nvme_ctrlr *nvme_ctrlr = ctx;
spdk_io_device_unregister(nvme_ctrlr, nvme_ctrlr_unregister_cb);
}
void
nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr)
{
pthread_mutex_lock(&nvme_ctrlr->mutex);
assert(nvme_ctrlr->ref > 0);
nvme_ctrlr->ref--;
if (nvme_ctrlr->ref > 0 || !nvme_ctrlr->destruct ||
nvme_ctrlr->resetting) {
pthread_mutex_unlock(&nvme_ctrlr->mutex);
return;
}
pthread_mutex_unlock(&nvme_ctrlr->mutex);
nvme_ctrlr_unregister(nvme_ctrlr);
}
int
bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct nvme_bdev *nbdev = io_device;
struct nvme_ns *nvme_ns;
struct spdk_io_channel *ch;
nvme_ns = nbdev->nvme_ns;
ch = spdk_get_io_channel(nvme_ns->ctrlr);
if (ch == NULL) {
SPDK_ERRLOG("Failed to alloc io_channel.\n");
return -ENOMEM;
}
nbdev_ch->ctrlr_ch = spdk_io_channel_get_ctx(ch);
nbdev_ch->nvme_ns = nvme_ns;
return 0;
}
void
bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct spdk_io_channel *ch;
ch = spdk_io_channel_from_ctx(nbdev_ch->ctrlr_ch);
spdk_put_io_channel(ch);
}
static inline bool
bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)

View File

@ -40,7 +40,147 @@
#include "spdk/nvme.h"
#include "spdk/bdev_module.h"
#include "common.h"
TAILQ_HEAD(nvme_ctrlrs, nvme_ctrlr);
extern struct nvme_ctrlrs g_nvme_ctrlrs;
extern pthread_mutex_t g_bdev_nvme_mutex;
extern bool g_bdev_nvme_module_finish;
#define NVME_MAX_CONTROLLERS 1024
typedef void (*spdk_bdev_create_nvme_fn)(void *ctx, size_t bdev_count, int rc);
struct nvme_async_probe_ctx {
struct spdk_nvme_probe_ctx *probe_ctx;
const char *base_name;
const char **names;
uint32_t count;
uint32_t prchk_flags;
struct spdk_poller *poller;
struct spdk_nvme_transport_id trid;
struct spdk_nvme_ctrlr_opts opts;
spdk_bdev_create_nvme_fn cb_fn;
void *cb_ctx;
uint32_t populates_in_progress;
bool ctrlr_attached;
bool probe_done;
bool namespaces_populated;
};
struct nvme_ns {
uint32_t id;
struct spdk_nvme_ns *ns;
struct nvme_ctrlr *ctrlr;
struct nvme_bdev *bdev;
uint32_t ana_group_id;
enum spdk_nvme_ana_state ana_state;
};
struct nvme_bdev_io;
struct nvme_ctrlr_trid {
struct spdk_nvme_transport_id trid;
TAILQ_ENTRY(nvme_ctrlr_trid) link;
bool is_failed;
};
typedef void (*bdev_nvme_reset_cb)(void *cb_arg, int rc);
struct nvme_ctrlr {
/**
* points to pinned, physically contiguous memory region;
* contains 4KB IDENTIFY structure for controller which is
* target for CONTROLLER IDENTIFY command during initialization
*/
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_transport_id *connected_trid;
char *name;
int ref;
bool resetting;
bool failover_in_progress;
bool destruct;
bool destruct_after_reset;
/**
* PI check flags. This flags is set to NVMe controllers created only
* through bdev_nvme_attach_controller RPC or .INI config file. Hot added
* NVMe controllers are not included.
*/
uint32_t prchk_flags;
uint32_t num_ns;
/** Array of pointers to namespaces indexed by nsid - 1 */
struct nvme_ns **namespaces;
struct spdk_opal_dev *opal_dev;
struct spdk_poller *adminq_timer_poller;
struct spdk_thread *thread;
bdev_nvme_reset_cb reset_cb_fn;
void *reset_cb_arg;
struct spdk_nvme_ctrlr_reset_ctx *reset_ctx;
struct spdk_poller *reset_poller;
/** linked list pointer for device list */
TAILQ_ENTRY(nvme_ctrlr) tailq;
TAILQ_HEAD(, nvme_ctrlr_trid) trids;
uint32_t ana_log_page_size;
struct spdk_nvme_ana_page *ana_log_page;
struct spdk_nvme_ana_group_descriptor *copied_ana_desc;
struct nvme_async_probe_ctx *probe_ctx;
pthread_mutex_t mutex;
};
struct nvme_bdev {
struct spdk_bdev disk;
struct nvme_ns *nvme_ns;
bool opal;
};
struct nvme_poll_group {
struct spdk_nvme_poll_group *group;
struct spdk_io_channel *accel_channel;
struct spdk_poller *poller;
bool collect_spin_stat;
uint64_t spin_ticks;
uint64_t start_ticks;
uint64_t end_ticks;
};
struct nvme_ctrlr_channel {
struct nvme_ctrlr *ctrlr;
struct spdk_nvme_qpair *qpair;
struct nvme_poll_group *group;
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
};
struct nvme_bdev_channel {
struct nvme_ns *nvme_ns;
struct nvme_ctrlr_channel *ctrlr_ch;
};
struct nvme_ctrlr *nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid);
struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name);
typedef void (*nvme_ctrlr_for_each_fn)(struct nvme_ctrlr *nvme_ctrlr, void *ctx);
void nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx);
void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
struct spdk_json_write_ctx *w);
void nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr);
void nvme_ctrlr_unregister(void *ctx);
void nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr);
int bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf);
void bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf);
struct nvme_ns *nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid);
struct nvme_ns *nvme_ctrlr_get_first_active_ns(struct nvme_ctrlr *nvme_ctrlr);
struct nvme_ns *nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns);
enum spdk_bdev_timeout_action {
SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE = 0,

View File

@ -34,7 +34,6 @@
#include "spdk/stdinc.h"
#include "bdev_nvme.h"
#include "common.h"
#include "spdk/config.h"

View File

@ -1,275 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/env.h"
#include "common.h"
struct nvme_ctrlrs g_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_ctrlrs);
pthread_mutex_t g_bdev_nvme_mutex = PTHREAD_MUTEX_INITIALIZER;
bool g_bdev_nvme_module_finish;
struct nvme_ns *
nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid)
{
assert(nsid > 0);
assert(nsid <= nvme_ctrlr->num_ns);
if (nsid == 0 || nsid > nvme_ctrlr->num_ns) {
return NULL;
}
return nvme_ctrlr->namespaces[nsid - 1];
}
struct nvme_ns *
nvme_ctrlr_get_first_active_ns(struct nvme_ctrlr *nvme_ctrlr)
{
uint32_t i;
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
if (nvme_ctrlr->namespaces[i] != NULL) {
return nvme_ctrlr->namespaces[i];
}
}
return NULL;
}
struct nvme_ns *
nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns)
{
uint32_t i;
if (ns == NULL) {
return NULL;
}
/* ns->id is a 1's based value and we want to start at the next
* entry in this array, so we start at ns->id and don't subtract to
* convert to 0's based. */
for (i = ns->id; i < nvme_ctrlr->num_ns; i++) {
if (nvme_ctrlr->namespaces[i] != NULL) {
return nvme_ctrlr->namespaces[i];
}
}
return NULL;
}
struct nvme_ctrlr *
nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
{
struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) {
break;
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return nvme_ctrlr;
}
struct nvme_ctrlr *
nvme_ctrlr_get_by_name(const char *name)
{
struct nvme_ctrlr *nvme_ctrlr;
if (name == NULL) {
return NULL;
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (strcmp(name, nvme_ctrlr->name) == 0) {
break;
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return nvme_ctrlr;
}
void
nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx)
{
struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
fn(nvme_ctrlr, ctx);
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
void
nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w)
{
const char *trtype_str;
const char *adrfam_str;
trtype_str = spdk_nvme_transport_id_trtype_str(trid->trtype);
if (trtype_str) {
spdk_json_write_named_string(w, "trtype", trtype_str);
}
adrfam_str = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
if (adrfam_str) {
spdk_json_write_named_string(w, "adrfam", adrfam_str);
}
if (trid->traddr[0] != '\0') {
spdk_json_write_named_string(w, "traddr", trid->traddr);
}
if (trid->trsvcid[0] != '\0') {
spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
}
if (trid->subnqn[0] != '\0') {
spdk_json_write_named_string(w, "subnqn", trid->subnqn);
}
}
void
nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr)
{
struct nvme_ctrlr_trid *trid, *tmp_trid;
uint32_t i;
free(nvme_ctrlr->copied_ana_desc);
spdk_free(nvme_ctrlr->ana_log_page);
if (nvme_ctrlr->opal_dev) {
spdk_opal_dev_destruct(nvme_ctrlr->opal_dev);
nvme_ctrlr->opal_dev = NULL;
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_REMOVE(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_nvme_detach(nvme_ctrlr->ctrlr);
spdk_poller_unregister(&nvme_ctrlr->adminq_timer_poller);
free(nvme_ctrlr->name);
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
free(nvme_ctrlr->namespaces[i]);
}
TAILQ_FOREACH_SAFE(trid, &nvme_ctrlr->trids, link, tmp_trid) {
TAILQ_REMOVE(&nvme_ctrlr->trids, trid, link);
free(trid);
}
pthread_mutex_destroy(&nvme_ctrlr->mutex);
free(nvme_ctrlr->namespaces);
free(nvme_ctrlr);
}
static void
nvme_ctrlr_unregister_cb(void *io_device)
{
struct nvme_ctrlr *nvme_ctrlr = io_device;
nvme_ctrlr_delete(nvme_ctrlr);
pthread_mutex_lock(&g_bdev_nvme_mutex);
if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_ctrlrs)) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_io_device_unregister(&g_nvme_ctrlrs, NULL);
spdk_bdev_module_fini_done();
return;
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
void
nvme_ctrlr_unregister(void *ctx)
{
struct nvme_ctrlr *nvme_ctrlr = ctx;
spdk_io_device_unregister(nvme_ctrlr, nvme_ctrlr_unregister_cb);
}
void
nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr)
{
pthread_mutex_lock(&nvme_ctrlr->mutex);
assert(nvme_ctrlr->ref > 0);
nvme_ctrlr->ref--;
if (nvme_ctrlr->ref > 0 || !nvme_ctrlr->destruct ||
nvme_ctrlr->resetting) {
pthread_mutex_unlock(&nvme_ctrlr->mutex);
return;
}
pthread_mutex_unlock(&nvme_ctrlr->mutex);
nvme_ctrlr_unregister(nvme_ctrlr);
}
int
bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct nvme_bdev *nbdev = io_device;
struct nvme_ns *nvme_ns;
struct spdk_io_channel *ch;
nvme_ns = nbdev->nvme_ns;
ch = spdk_get_io_channel(nvme_ns->ctrlr);
if (ch == NULL) {
SPDK_ERRLOG("Failed to alloc io_channel.\n");
return -ENOMEM;
}
nbdev_ch->ctrlr_ch = spdk_io_channel_get_ctx(ch);
nbdev_ch->nvme_ns = nvme_ns;
return 0;
}
void
bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct spdk_io_channel *ch;
ch = spdk_io_channel_from_ctx(nbdev_ch->ctrlr_ch);
spdk_put_io_channel(ch);
}

View File

@ -1,184 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPDK_COMMON_BDEV_NVME_H
#define SPDK_COMMON_BDEV_NVME_H
#include "spdk/likely.h"
#include "spdk/nvme.h"
#include "spdk/bdev_module.h"
#include "spdk/opal.h"
TAILQ_HEAD(nvme_ctrlrs, nvme_ctrlr);
extern struct nvme_ctrlrs g_nvme_ctrlrs;
extern pthread_mutex_t g_bdev_nvme_mutex;
extern bool g_bdev_nvme_module_finish;
#define NVME_MAX_CONTROLLERS 1024
typedef void (*spdk_bdev_create_nvme_fn)(void *ctx, size_t bdev_count, int rc);
struct nvme_async_probe_ctx {
struct spdk_nvme_probe_ctx *probe_ctx;
const char *base_name;
const char **names;
uint32_t count;
uint32_t prchk_flags;
struct spdk_poller *poller;
struct spdk_nvme_transport_id trid;
struct spdk_nvme_ctrlr_opts opts;
spdk_bdev_create_nvme_fn cb_fn;
void *cb_ctx;
uint32_t populates_in_progress;
bool ctrlr_attached;
bool probe_done;
bool namespaces_populated;
};
struct nvme_ns {
uint32_t id;
struct spdk_nvme_ns *ns;
struct nvme_ctrlr *ctrlr;
struct nvme_bdev *bdev;
uint32_t ana_group_id;
enum spdk_nvme_ana_state ana_state;
};
struct nvme_bdev_io;
struct nvme_ctrlr_trid {
struct spdk_nvme_transport_id trid;
TAILQ_ENTRY(nvme_ctrlr_trid) link;
bool is_failed;
};
typedef void (*bdev_nvme_reset_cb)(void *cb_arg, int rc);
struct nvme_ctrlr {
/**
* points to pinned, physically contiguous memory region;
* contains 4KB IDENTIFY structure for controller which is
* target for CONTROLLER IDENTIFY command during initialization
*/
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_transport_id *connected_trid;
char *name;
int ref;
bool resetting;
bool failover_in_progress;
bool destruct;
bool destruct_after_reset;
/**
* PI check flags. This flags is set to NVMe controllers created only
* through bdev_nvme_attach_controller RPC or .INI config file. Hot added
* NVMe controllers are not included.
*/
uint32_t prchk_flags;
uint32_t num_ns;
/** Array of pointers to namespaces indexed by nsid - 1 */
struct nvme_ns **namespaces;
struct spdk_opal_dev *opal_dev;
struct spdk_poller *adminq_timer_poller;
struct spdk_thread *thread;
bdev_nvme_reset_cb reset_cb_fn;
void *reset_cb_arg;
struct spdk_nvme_ctrlr_reset_ctx *reset_ctx;
struct spdk_poller *reset_poller;
/** linked list pointer for device list */
TAILQ_ENTRY(nvme_ctrlr) tailq;
TAILQ_HEAD(, nvme_ctrlr_trid) trids;
uint32_t ana_log_page_size;
struct spdk_nvme_ana_page *ana_log_page;
struct spdk_nvme_ana_group_descriptor *copied_ana_desc;
struct nvme_async_probe_ctx *probe_ctx;
pthread_mutex_t mutex;
};
struct nvme_bdev {
struct spdk_bdev disk;
struct nvme_ns *nvme_ns;
bool opal;
};
struct nvme_poll_group {
struct spdk_nvme_poll_group *group;
struct spdk_io_channel *accel_channel;
struct spdk_poller *poller;
bool collect_spin_stat;
uint64_t spin_ticks;
uint64_t start_ticks;
uint64_t end_ticks;
};
struct nvme_ctrlr_channel {
struct nvme_ctrlr *ctrlr;
struct spdk_nvme_qpair *qpair;
struct nvme_poll_group *group;
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
};
struct nvme_bdev_channel {
struct nvme_ns *nvme_ns;
struct nvme_ctrlr_channel *ctrlr_ch;
};
struct nvme_ctrlr *nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid);
struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name);
typedef void (*nvme_ctrlr_for_each_fn)(struct nvme_ctrlr *nvme_ctrlr, void *ctx);
void nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx);
void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
struct spdk_json_write_ctx *w);
void nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr);
void nvme_ctrlr_unregister(void *ctx);
void nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr);
int bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf);
void bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf);
struct nvme_ns *nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid);
struct nvme_ns *nvme_ctrlr_get_first_active_ns(struct nvme_ctrlr *nvme_ctrlr);
struct nvme_ns *nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns);
#endif /* SPDK_COMMON_BDEV_NVME_H */

View File

@ -39,7 +39,6 @@
#include "spdk/log.h"
#include "bdev_nvme.h"
#include "common.h"
#include "spdk/base64.h"
enum spdk_nvme_rpc_type {

View File

@ -35,7 +35,6 @@
#include "spdk/bdev_module.h"
#include "bdev_nvme.h"
#include "common.h"
int vbdev_opal_create(const char *nvme_ctrlr_name, uint32_t nsid, uint8_t locking_range_id,
uint64_t range_start, uint64_t range_length, const char *password);

View File

@ -35,6 +35,7 @@
#include "spdk/util.h"
#include "spdk/string.h"
#include "spdk/log.h"
#include "spdk/opal.h"
#include "vbdev_opal.h"

View File

@ -41,7 +41,6 @@
#include "common/lib/ut_multithread.c"
#include "bdev/nvme/bdev_nvme.c"
#include "bdev/nvme/common.c"
#include "unit/lib/json_mock.c"