nvmf: Remove direct mode

There is now only virtual mode. Virtual mode has been
improved enough to reach feature parity with direct
mode and performance benchmarks show no degradation.
Simplify the code by always using virtual mode.

Change-Id: Id5cdb5d4d8c54e661b245ed7250c2f9d66ca2152
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/369496
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2017-06-27 11:26:19 -07:00
parent 7e9f556363
commit f6e62d2ce1
40 changed files with 233 additions and 802 deletions

View File

@ -133,7 +133,6 @@ spdk_add_nvmf_discovery_subsystem(void)
struct nvmf_tgt_subsystem *app_subsys;
app_subsys = nvmf_tgt_create_subsystem(SPDK_NVMF_DISCOVERY_NQN, SPDK_NVMF_SUBTYPE_DISCOVERY,
NVMF_SUBSYSTEM_MODE_DIRECT,
g_spdk_nvmf_tgt_conf.acceptor_lcore);
if (app_subsys == NULL) {
SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n");
@ -224,70 +223,6 @@ spdk_nvmf_parse_nvmf_tgt(void)
return 0;
}
static bool
probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr_opts *opts)
{
struct spdk_nvmf_probe_ctx *ctx = cb_ctx;
if (ctx->any && !ctx->found) {
ctx->found = true;
return true;
}
if (strcmp(trid->traddr, ctx->trid.traddr) == 0) {
ctx->found = true;
return true;
}
return false;
}
static void
attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
{
struct spdk_nvmf_probe_ctx *ctx = cb_ctx;
int rc;
int numa_node = -1;
struct spdk_pci_addr pci_addr;
struct spdk_pci_device *pci_dev;
spdk_pci_addr_parse(&pci_addr, trid->traddr);
if (ctx->trid.traddr[0] != '\0' && strcmp(trid->traddr, ctx->trid.traddr)) {
SPDK_WARNLOG("Attached device is not expected\n");
return;
}
SPDK_NOTICELOG("Attaching NVMe device %p at %s to subsystem %s\n",
ctrlr,
trid->traddr,
spdk_nvmf_subsystem_get_nqn(ctx->app_subsystem->subsystem));
pci_dev = spdk_pci_get_device(&pci_addr);
if (pci_dev) {
numa_node = spdk_pci_device_get_socket_id(pci_dev);
}
if (numa_node >= 0) {
/* Running subsystem and NVMe device is on the same socket or not */
if (spdk_env_get_socket_id(ctx->app_subsystem->lcore) != (unsigned)numa_node) {
SPDK_WARNLOG("Subsystem %s is configured to run on a CPU core %u belonging "
"to a different NUMA node than the associated NVMe device. "
"This may result in reduced performance.\n",
spdk_nvmf_subsystem_get_nqn(ctx->app_subsystem->subsystem),
ctx->app_subsystem->lcore);
SPDK_WARNLOG("The NVMe device is on socket %u\n", numa_node);
SPDK_WARNLOG("The Subsystem is on socket %u\n",
spdk_env_get_socket_id(ctx->app_subsystem->lcore));
}
}
rc = nvmf_subsystem_add_ctrlr(ctx->app_subsystem->subsystem, ctrlr, &pci_addr);
if (rc < 0) {
SPDK_ERRLOG("Failed to add controller to subsystem\n");
}
ctx->found = true;
}
static int
spdk_nvmf_allocate_lcore(uint64_t mask, uint32_t lcore)
{
@ -312,7 +247,7 @@ spdk_nvmf_allocate_lcore(uint64_t mask, uint32_t lcore)
static int
spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp)
{
const char *nqn, *mode_str;
const char *nqn, *mode;
int i, ret;
int lcore;
int num_listen_addrs;
@ -320,15 +255,29 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp)
char *listen_addrs_str[MAX_LISTEN_ADDRESSES] = {};
int num_hosts;
char *hosts[MAX_HOSTS];
const char *bdf;
const char *sn;
int num_devs;
char *devs[MAX_VIRTUAL_NAMESPACE];
nqn = spdk_conf_section_get_val(sp, "NQN");
mode_str = spdk_conf_section_get_val(sp, "Mode");
mode = spdk_conf_section_get_val(sp, "Mode");
lcore = spdk_conf_section_get_intval(sp, "Core");
/* Mode is no longer a valid parameter, but print out a nice
* message if it exists to inform users.
*/
if (mode) {
SPDK_NOTICELOG("Mode present in the [Subsystem] section of the config file.\n"
"Mode was removed as a valid parameter.\n");
if (strcasecmp(mode, "Virtual")) {
SPDK_NOTICELOG("Your mode value is 'Virtual' which is now the only possible mode.\n"
"Your configuration file will work as expected.\n");
} else {
SPDK_NOTICELOG("Please remove Mode from your configuration file.\n");
return -1;
}
}
/* Parse Listen sections */
num_listen_addrs = 0;
for (i = 0; i < MAX_LISTEN_ADDRESSES; i++) {
@ -366,7 +315,6 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp)
}
num_hosts = i;
bdf = spdk_conf_section_get_val(sp, "NVMe");
sn = spdk_conf_section_get_val(sp, "SN");
num_devs = 0;
@ -379,10 +327,10 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp)
num_devs++;
}
ret = spdk_nvmf_construct_subsystem(nqn, mode_str, lcore,
ret = spdk_nvmf_construct_subsystem(nqn, lcore,
num_listen_addrs, listen_addrs,
num_hosts, hosts,
bdf, sn,
sn,
num_devs, devs);
for (i = 0; i < MAX_LISTEN_ADDRESSES; i++) {
@ -432,18 +380,18 @@ spdk_nvmf_parse_conf(void)
}
int
spdk_nvmf_construct_subsystem(const char *name,
const char *mode_str, int32_t lcore,
spdk_nvmf_construct_subsystem(const char *name, int32_t lcore,
int num_listen_addresses, struct rpc_listen_address *addresses,
int num_hosts, char *hosts[], const char *bdf,
int num_hosts, char *hosts[],
const char *sn, int num_devs, char *dev_list[])
{
struct spdk_nvmf_subsystem *subsystem;
struct nvmf_tgt_subsystem *app_subsys;
struct spdk_nvmf_listen_addr *listen_addr;
enum spdk_nvmf_subsystem_mode mode;
int i;
uint64_t mask;
struct spdk_bdev *bdev;
const char *namespace;
if (name == NULL) {
SPDK_ERRLOG("No NQN specified for subsystem\n");
@ -469,23 +417,7 @@ spdk_nvmf_construct_subsystem(const char *name,
lcore = spdk_nvmf_allocate_lcore(mask, lcore);
g_last_core = lcore;
/* Determine the mode the subsysem will operate in */
if (mode_str == NULL) {
SPDK_ERRLOG("No Mode specified for Subsystem %s\n", name);
return -1;
}
if (strcasecmp(mode_str, "Direct") == 0) {
mode = NVMF_SUBSYSTEM_MODE_DIRECT;
} else if (strcasecmp(mode_str, "Virtual") == 0) {
mode = NVMF_SUBSYSTEM_MODE_VIRTUAL;
} else {
SPDK_ERRLOG("Invalid Subsystem mode: %s\n", mode_str);
return -1;
}
app_subsys = nvmf_tgt_create_subsystem(name, SPDK_NVMF_SUBTYPE_NVME,
mode, lcore);
app_subsys = nvmf_tgt_create_subsystem(name, SPDK_NVMF_SUBTYPE_NVME, lcore);
if (app_subsys == NULL) {
SPDK_ERRLOG("Subsystem creation failed\n");
return -1;
@ -544,49 +476,6 @@ spdk_nvmf_construct_subsystem(const char *name,
spdk_nvmf_subsystem_add_host(subsystem, hosts[i]);
}
if (mode == NVMF_SUBSYSTEM_MODE_DIRECT) {
struct spdk_nvmf_probe_ctx ctx = { 0 };
struct spdk_nvme_transport_id trid = {};
struct spdk_pci_addr pci_addr = {};
if (bdf == NULL) {
SPDK_ERRLOG("Subsystem %s: missing NVMe directive\n", name);
goto error;
}
if (num_devs != 0) {
SPDK_ERRLOG("Subsystem %s: Namespaces not allowed for Direct mode\n", name);
goto error;
}
trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
ctx.app_subsystem = app_subsys;
ctx.found = false;
if (strcmp(bdf, "*") == 0) {
ctx.any = true;
} else {
if (spdk_pci_addr_parse(&pci_addr, bdf) < 0) {
SPDK_ERRLOG("Invalid format for NVMe BDF: %s\n", bdf);
goto error;
}
ctx.any = false;
spdk_pci_addr_fmt(trid.traddr, sizeof(trid.traddr), &pci_addr);
ctx.trid = trid;
}
if (spdk_nvme_probe(&trid, &ctx, probe_cb, attach_cb, NULL)) {
SPDK_ERRLOG("One or more controllers failed in spdk_nvme_probe()\n");
}
if (!ctx.found) {
SPDK_ERRLOG("Could not find NVMe controller at PCI address %04x:%02x:%02x.%x\n",
pci_addr.domain, pci_addr.bus, pci_addr.dev, pci_addr.func);
goto error;
}
} else {
struct spdk_bdev *bdev;
const char *namespace;
if (sn == NULL) {
SPDK_ERRLOG("Subsystem %s: missing serial number\n", name);
goto error;
@ -616,7 +505,6 @@ spdk_nvmf_construct_subsystem(const char *name,
spdk_bdev_get_name(bdev), spdk_nvmf_subsystem_get_nqn(subsystem));
}
}
nvmf_tgt_start_subsystem(app_subsys);

View File

@ -58,14 +58,6 @@ dump_nvmf_subsystem(struct spdk_json_write_ctx *w, struct nvmf_tgt_subsystem *tg
spdk_json_write_name(w, "nqn");
spdk_json_write_string(w, spdk_nvmf_subsystem_get_nqn(subsystem));
if (spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME) {
spdk_json_write_name(w, "mode");
if (spdk_nvmf_subsystem_get_mode(subsystem) == NVMF_SUBSYSTEM_MODE_DIRECT) {
spdk_json_write_string(w, "direct");
} else {
spdk_json_write_string(w, "virtual");
}
}
spdk_json_write_name(w, "subtype");
if (spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME) {
spdk_json_write_string(w, "NVMe");
@ -112,22 +104,14 @@ dump_nvmf_subsystem(struct spdk_json_write_ctx *w, struct nvmf_tgt_subsystem *tg
spdk_json_write_array_end(w);
if (spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME) {
if (spdk_nvmf_subsystem_get_mode(subsystem) == NVMF_SUBSYSTEM_MODE_DIRECT) {
spdk_json_write_name(w, "pci_address");
spdk_json_write_string_fmt(w, "%04x:%02x:%02x.%x",
subsystem->dev.direct.pci_addr.domain,
subsystem->dev.direct.pci_addr.bus,
subsystem->dev.direct.pci_addr.dev,
subsystem->dev.direct.pci_addr.func);
} else {
uint32_t i;
spdk_json_write_name(w, "serial_number");
spdk_json_write_string(w, spdk_nvmf_subsystem_get_sn(subsystem));
spdk_json_write_name(w, "namespaces");
spdk_json_write_array_begin(w);
for (i = 0; i < subsystem->dev.virt.max_nsid; i++) {
if (subsystem->dev.virt.ns_list[i] == NULL) {
for (i = 0; i < subsystem->dev.max_nsid; i++) {
if (subsystem->dev.ns_list[i] == NULL) {
continue;
}
@ -135,12 +119,10 @@ dump_nvmf_subsystem(struct spdk_json_write_ctx *w, struct nvmf_tgt_subsystem *tg
spdk_json_write_name(w, "nsid");
spdk_json_write_int32(w, i + 1);
spdk_json_write_name(w, "name");
spdk_json_write_string(w, spdk_bdev_get_name(subsystem->dev.virt.ns_list[i]));
spdk_json_write_string(w, spdk_bdev_get_name(subsystem->dev.ns_list[i]));
spdk_json_write_object_end(w);
}
spdk_json_write_array_end(w);
}
}
spdk_json_write_object_end(w);
}
@ -299,11 +281,10 @@ free_rpc_subsystem(struct rpc_subsystem *req)
static const struct spdk_json_object_decoder rpc_subsystem_decoders[] = {
{"core", offsetof(struct rpc_subsystem, core), spdk_json_decode_int32, true},
{"mode", offsetof(struct rpc_subsystem, mode), spdk_json_decode_string},
{"mode", offsetof(struct rpc_subsystem, mode), spdk_json_decode_string, true},
{"nqn", offsetof(struct rpc_subsystem, nqn), spdk_json_decode_string},
{"listen_addresses", offsetof(struct rpc_subsystem, listen_addresses), decode_rpc_listen_addresses},
{"hosts", offsetof(struct rpc_subsystem, hosts), decode_rpc_hosts, true},
{"pci_address", offsetof(struct rpc_subsystem, pci_address), spdk_json_decode_string, true},
{"serial_number", offsetof(struct rpc_subsystem, serial_number), spdk_json_decode_string, true},
{"namespaces", offsetof(struct rpc_subsystem, namespaces), decode_rpc_dev_names, true},
};
@ -324,10 +305,25 @@ spdk_rpc_construct_nvmf_subsystem(struct spdk_jsonrpc_request *request,
goto invalid;
}
ret = spdk_nvmf_construct_subsystem(req.nqn, req.mode, req.core,
/* Mode is no longer a valid parameter, but print out a nice
* message if it exists to inform users.
*/
if (req.mode) {
SPDK_NOTICELOG("Mode present in the construct NVMe-oF subsystem RPC.\n"
"Mode was removed as a valid parameter.\n");
if (strcasecmp(req.mode, "Virtual")) {
SPDK_NOTICELOG("Your mode value is 'Virtual' which is now the only possible mode.\n"
"Your RPC will work as expected.\n");
} else {
SPDK_NOTICELOG("Please remove 'mode' from the RPC.\n");
goto invalid;
}
}
ret = spdk_nvmf_construct_subsystem(req.nqn, req.core,
req.listen_addresses.num_listen_address,
req.listen_addresses.addresses,
req.hosts.num_hosts, req.hosts.hosts, req.pci_address,
req.hosts.num_hosts, req.hosts.hosts,
req.serial_number,
req.namespaces.num_names, req.namespaces.names);
if (ret) {

View File

@ -184,7 +184,7 @@ nvmf_tgt_start_subsystem(struct nvmf_tgt_subsystem *app_subsys)
struct nvmf_tgt_subsystem *
nvmf_tgt_create_subsystem(const char *name, enum spdk_nvmf_subtype subtype,
enum spdk_nvmf_subsystem_mode mode, uint32_t lcore)
uint32_t lcore)
{
struct spdk_nvmf_subsystem *subsystem;
struct nvmf_tgt_subsystem *app_subsys;
@ -200,7 +200,7 @@ nvmf_tgt_create_subsystem(const char *name, enum spdk_nvmf_subtype subtype,
return NULL;
}
subsystem = spdk_nvmf_create_subsystem(name, subtype, mode, app_subsys, connect_cb,
subsystem = spdk_nvmf_create_subsystem(name, subtype, app_subsys, connect_cb,
disconnect_cb);
if (subsystem == NULL) {
SPDK_ERRLOG("Subsystem creation failed\n");

View File

@ -75,14 +75,13 @@ void nvmf_tgt_start_subsystem(struct nvmf_tgt_subsystem *subsystem);
struct nvmf_tgt_subsystem *nvmf_tgt_create_subsystem(const char *name,
enum spdk_nvmf_subtype subtype,
enum spdk_nvmf_subsystem_mode mode,
uint32_t lcore);
int
spdk_nvmf_construct_subsystem(const char *name,
const char *mode, int32_t lcore,
int32_t lcore,
int num_listen_addresses, struct rpc_listen_address *addresses,
int num_hosts, char *hosts[], const char *bdf,
int num_hosts, char *hosts[],
const char *sn, int num_devs, char *dev_list[]);
int

View File

@ -148,21 +148,24 @@ ReactorMask 0xF000000
the [Subsystem] section of the configuration file. For example,
to assign the Subsystems to lcores 25 and 26:
~~~{.sh}
[Nvme]
TransportID "trtype:PCIe traddr:0000:02:00.0" Nvme0
TransportID "trtype:PCIe traddr:0000:82:00.0" Nvme1
[Subsystem1]
NQN nqn.2016-06.io.spdk:cnode1
Core 25
Mode Direct
Listen RDMA 192.168.100.8:4420
Host nqn.2016-06.io.spdk:init
NVMe 0000:81:00.0
SN SPDK00000000000001
Namespace Nvme0n1
[Subsystem2]
NQN nqn.2016-06.io.spdk:cnode2
Core 26
Mode Direct
Listen RDMA 192.168.100.9:4420
Host nqn.2016-06.io.spdk:init
NVMe 0000:86:00.0
SN SPDK00000000000002
Namespace Nvme1n1
~~~
SPDK executes all code for an NVMe-oF subsystem on a single thread. Different subsystems may execute
on different threads. SPDK gives the user maximum control to determine how many CPU cores are used
@ -178,15 +181,13 @@ file as follows:
**Create malloc LUNs:** See @ref bdev_getting_started for details on creating Malloc block devices.
**Create a virtual controller:** Virtual mode allows any SPDK block device to be presented as an
NVMe-oF namespace. These block devices don't need to be NVMe devices. For example, to create a
virtual controller for malloc LUNs named Malloc0 and Malloc1:
**Create a virtual controller:** Any bdev may be presented as a namespace. For example, to create a
virtual controller with two namespaces backed by the malloc LUNs named Malloc0 and Malloc1:
~~~{.sh}
# Virtual controller
[Subsystem2]
NQN nqn.2016-06.io.spdk:cnode2
Core 0
Mode Virtual
Listen RDMA 192.168.2.21:4420
Host nqn.2016-06.io.spdk:init
SN SPDK00000000000001

View File

@ -119,13 +119,6 @@
# - Core may be set or not. If set, the specified subsystem will run on
# it, otherwise each subsystem will use a round-robin method to allocate
# core from available cores, lcores are numbered starting at 0.
# - Mode may be either "Direct" or "Virtual". Direct means that physical
# devices attached to the target will be presented to hosts as if they
# were directly attached to the host. No software emulation or command
# validation is performed. Virtual means that an NVMe controller is
# emulated in software and the namespaces it contains map to block devices
# on the target system. These block devices do not need to be NVMe devices.
# Only Direct mode is currently supported.
# - Between 1 and 255 Listen directives are allowed. This defines
# the addresses on which new connections may be accepted. The format
# is Listen <type> <address> where type currently can only be RDMA.
@ -136,26 +129,24 @@
# PCI domain:bus:device.function can be replaced by "*" to indicate
# any PCI device.
# Direct controller
# Namespaces backed by physical NVMe devices
[Subsystem1]
NQN nqn.2016-06.io.spdk:cnode1
Core 0
Mode Direct
Listen RDMA 15.15.15.2:4420
Host nqn.2016-06.io.spdk:init
NVMe 0000:00:00.0
# Multiple subsystems are allowed.
# Virtual controller
[Subsystem2]
NQN nqn.2016-06.io.spdk:cnode2
Core 0
Mode Virtual
Listen RDMA 192.168.2.21:4420
Host nqn.2016-06.io.spdk:init
SN SPDK00000000000001
Namespace Nvme0n1
Namespace Nvme1n1
# Multiple subsystems are allowed.
# Namespaces backed by non-NVMe devices
[Subsystem2]
NQN nqn.2016-06.io.spdk:cnode2
Core 0
Listen RDMA 192.168.2.21:4420
Host nqn.2016-06.io.spdk:init
SN SPDK00000000000002
Namespace Malloc0
Namespace Malloc1
Namespace AIO0

View File

@ -59,7 +59,6 @@ struct spdk_nvmf_session;
struct spdk_nvmf_conn;
struct spdk_nvmf_request;
struct spdk_bdev;
struct spdk_nvme_ctrlr;
struct spdk_nvmf_request;
struct spdk_nvmf_conn;
struct spdk_nvmf_ctrlr_ops;
@ -67,11 +66,6 @@ struct spdk_nvmf_ctrlr_ops;
typedef void (*spdk_nvmf_subsystem_connect_fn)(void *cb_ctx, struct spdk_nvmf_request *req);
typedef void (*spdk_nvmf_subsystem_disconnect_fn)(void *cb_ctx, struct spdk_nvmf_conn *conn);
enum spdk_nvmf_subsystem_mode {
NVMF_SUBSYSTEM_MODE_DIRECT = 0,
NVMF_SUBSYSTEM_MODE_VIRTUAL = 1,
};
struct spdk_nvmf_listen_addr {
char *traddr;
char *trsvcid;
@ -98,17 +92,8 @@ struct spdk_nvmf_subsystem_allowed_listener {
struct spdk_nvmf_subsystem {
uint32_t id;
char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
enum spdk_nvmf_subsystem_mode mode;
enum spdk_nvmf_subtype subtype;
bool is_removed;
union {
struct {
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_qpair *io_qpair;
struct spdk_pci_addr pci_addr;
struct spdk_poller *admin_poller;
int32_t outstanding_admin_cmd_count;
} direct;
struct {
char sn[MAX_SN_LEN + 1];
@ -116,7 +101,6 @@ struct spdk_nvmf_subsystem {
struct spdk_bdev_desc *desc[MAX_VIRTUAL_NAMESPACE];
struct spdk_io_channel *ch[MAX_VIRTUAL_NAMESPACE];
uint32_t max_nsid;
} virt;
} dev;
const struct spdk_nvmf_ctrlr_ops *ops;
@ -136,7 +120,6 @@ struct spdk_nvmf_subsystem {
struct spdk_nvmf_subsystem *spdk_nvmf_create_subsystem(const char *nqn,
enum spdk_nvmf_subtype type,
enum spdk_nvmf_subsystem_mode mode,
void *cb_ctx,
spdk_nvmf_subsystem_connect_fn connect_cb,
spdk_nvmf_subsystem_disconnect_fn disconnect_cb);
@ -166,9 +149,6 @@ bool spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem,
int spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem,
const char *host_nqn);
int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_pci_addr *pci_addr);
void spdk_nvmf_subsystem_poll(struct spdk_nvmf_subsystem *subsystem);
/**
@ -190,7 +170,6 @@ int spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char
const char *spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem);
enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem);
enum spdk_nvmf_subsystem_mode spdk_nvmf_subsystem_get_mode(struct spdk_nvmf_subsystem *subsystem);
void spdk_nvmf_acceptor_poll(void);

View File

@ -37,9 +37,9 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
CFLAGS += $(ENV_CFLAGS)
LIBNAME = nvmf
C_SRCS = discovery.c subsystem.c nvmf.c \
C_SRCS = subsystem.c nvmf.c \
request.c session.c transport.c \
direct.c virtual.c
ctrlr_discovery.c ctrlr_bdev.c
C_SRCS-$(CONFIG_RDMA) += rdma.c

View File

@ -49,7 +49,7 @@
#include "spdk_internal/log.h"
#define MODEL_NUMBER "SPDK Virtual Controller"
#define MODEL_NUMBER "SPDK bdev Controller"
#define FW_VERSION "FFFFFFFF"
/* read command dword 12 */
@ -61,12 +61,12 @@ struct __attribute__((packed)) nvme_read_cdw12 {
uint8_t lr : 1; /* limited retry */
};
static void nvmf_virtual_set_dsm(struct spdk_nvmf_session *session)
static void nvmf_bdev_set_dsm(struct spdk_nvmf_session *session)
{
uint32_t i;
for (i = 0; i < session->subsys->dev.virt.max_nsid; i++) {
struct spdk_bdev *bdev = session->subsys->dev.virt.ns_list[i];
for (i = 0; i < session->subsys->dev.max_nsid; i++) {
struct spdk_bdev *bdev = session->subsys->dev.ns_list[i];
if (bdev == NULL) {
continue;
@ -86,7 +86,7 @@ static void nvmf_virtual_set_dsm(struct spdk_nvmf_session *session)
}
static void
nvmf_virtual_ctrlr_get_data(struct spdk_nvmf_session *session)
nvmf_bdev_ctrlr_get_data(struct spdk_nvmf_session *session)
{
struct spdk_nvmf_subsystem *subsys = session->subsys;
@ -110,21 +110,21 @@ nvmf_virtual_ctrlr_get_data(struct spdk_nvmf_session *session)
session->vcdata.cqes.min = 0x04;
session->vcdata.cqes.max = 0x04;
session->vcdata.maxcmd = 1024;
session->vcdata.nn = subsys->dev.virt.max_nsid;
session->vcdata.nn = subsys->dev.max_nsid;
session->vcdata.vwc.present = 1;
session->vcdata.sgls.supported = 1;
strncpy(session->vcdata.subnqn, session->subsys->subnqn, sizeof(session->vcdata.subnqn));
nvmf_virtual_set_dsm(session);
nvmf_bdev_set_dsm(session);
}
static void
nvmf_virtual_ctrlr_poll_for_completions(struct spdk_nvmf_subsystem *subsystem)
nvmf_bdev_ctrlr_poll_for_completions(struct spdk_nvmf_subsystem *subsystem)
{
return;
}
static void
nvmf_virtual_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
void *cb_arg)
{
struct spdk_nvmf_request *req = cb_arg;
@ -145,7 +145,7 @@ nvmf_virtual_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
}
static int
nvmf_virtual_ctrlr_get_log_page(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_get_log_page(struct spdk_nvmf_request *req)
{
uint8_t lid;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
@ -190,13 +190,13 @@ identify_ns(struct spdk_nvmf_subsystem *subsystem,
struct spdk_bdev *bdev;
uint64_t num_blocks;
if (cmd->nsid > subsystem->dev.virt.max_nsid || cmd->nsid == 0) {
if (cmd->nsid > subsystem->dev.max_nsid || cmd->nsid == 0) {
SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", cmd->nsid);
rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
bdev = subsystem->dev.virt.ns_list[cmd->nsid - 1];
bdev = subsystem->dev.ns_list[cmd->nsid - 1];
if (bdev == NULL) {
memset(nsdata, 0, sizeof(*nsdata));
@ -236,13 +236,13 @@ identify_active_ns_list(struct spdk_nvmf_subsystem *subsystem,
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
num_ns = subsystem->dev.virt.max_nsid;
num_ns = subsystem->dev.max_nsid;
for (i = 1; i <= num_ns; i++) {
if (i <= cmd->nsid) {
continue;
}
if (subsystem->dev.virt.ns_list[i - 1] == NULL) {
if (subsystem->dev.ns_list[i - 1] == NULL) {
continue;
}
ns_list->ns_list[count++] = i;
@ -255,7 +255,7 @@ identify_active_ns_list(struct spdk_nvmf_subsystem *subsystem,
}
static int
nvmf_virtual_ctrlr_identify(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req)
{
uint8_t cns;
struct spdk_nvmf_session *session = req->conn->sess;
@ -287,7 +287,7 @@ nvmf_virtual_ctrlr_identify(struct spdk_nvmf_request *req)
}
static int
nvmf_virtual_ctrlr_abort(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
@ -334,7 +334,7 @@ nvmf_virtual_ctrlr_abort(struct spdk_nvmf_request *req)
}
static int
nvmf_virtual_ctrlr_get_features(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_get_features(struct spdk_nvmf_request *req)
{
uint8_t feature;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
@ -361,7 +361,7 @@ nvmf_virtual_ctrlr_get_features(struct spdk_nvmf_request *req)
}
static int
nvmf_virtual_ctrlr_set_features(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_set_features(struct spdk_nvmf_request *req)
{
uint8_t feature;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
@ -385,7 +385,7 @@ nvmf_virtual_ctrlr_set_features(struct spdk_nvmf_request *req)
}
static int
nvmf_virtual_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
@ -395,15 +395,15 @@ nvmf_virtual_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
switch (cmd->opc) {
case SPDK_NVME_OPC_GET_LOG_PAGE:
return nvmf_virtual_ctrlr_get_log_page(req);
return nvmf_bdev_ctrlr_get_log_page(req);
case SPDK_NVME_OPC_IDENTIFY:
return nvmf_virtual_ctrlr_identify(req);
return nvmf_bdev_ctrlr_identify(req);
case SPDK_NVME_OPC_ABORT:
return nvmf_virtual_ctrlr_abort(req);
return nvmf_bdev_ctrlr_abort(req);
case SPDK_NVME_OPC_GET_FEATURES:
return nvmf_virtual_ctrlr_get_features(req);
return nvmf_bdev_ctrlr_get_features(req);
case SPDK_NVME_OPC_SET_FEATURES:
return nvmf_virtual_ctrlr_set_features(req);
return nvmf_bdev_ctrlr_set_features(req);
case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
return spdk_nvmf_session_async_event_request(req);
case SPDK_NVME_OPC_KEEP_ALIVE:
@ -434,7 +434,7 @@ nvmf_virtual_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
}
static int
nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
nvmf_bdev_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
{
uint64_t lba_address;
@ -468,14 +468,14 @@ nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
if (cmd->opc == SPDK_NVME_OPC_READ) {
spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0);
if (spdk_bdev_read(desc, ch, req->data, offset, req->length, nvmf_virtual_ctrlr_complete_cmd,
if (spdk_bdev_read(desc, ch, req->data, offset, req->length, nvmf_bdev_ctrlr_complete_cmd,
req)) {
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
} else {
spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0);
if (spdk_bdev_write(desc, ch, req->data, offset, req->length, nvmf_virtual_ctrlr_complete_cmd,
if (spdk_bdev_write(desc, ch, req->data, offset, req->length, nvmf_bdev_ctrlr_complete_cmd,
req)) {
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
@ -486,14 +486,14 @@ nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
}
static int
nvmf_virtual_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
{
uint64_t nbytes;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
nbytes = spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev);
if (spdk_bdev_flush(desc, ch, 0, nbytes, nvmf_virtual_ctrlr_complete_cmd, req)) {
if (spdk_bdev_flush(desc, ch, 0, nbytes, nvmf_bdev_ctrlr_complete_cmd, req)) {
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
@ -501,7 +501,7 @@ nvmf_virtual_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc
}
static int
nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
{
int i;
@ -533,7 +533,7 @@ nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
to_be64(&unmap[i].lba, dsm_range[i].starting_lba);
to_be32(&unmap[i].block_count, dsm_range[i].length);
}
if (spdk_bdev_unmap(desc, ch, unmap, nr, nvmf_virtual_ctrlr_complete_cmd, req)) {
if (spdk_bdev_unmap(desc, ch, unmap, nr, nvmf_bdev_ctrlr_complete_cmd, req)) {
free(unmap);
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
@ -549,11 +549,11 @@ nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
}
static int
nvmf_virtual_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
{
if (spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length,
nvmf_virtual_ctrlr_complete_cmd, req)) {
nvmf_bdev_ctrlr_complete_cmd, req)) {
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
@ -563,7 +563,7 @@ nvmf_virtual_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_des
}
static int
nvmf_virtual_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
nvmf_bdev_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
{
uint32_t nsid;
struct spdk_bdev *bdev;
@ -577,78 +577,78 @@ nvmf_virtual_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
response->status.sc = SPDK_NVME_SC_SUCCESS;
nsid = cmd->nsid;
if (nsid > subsystem->dev.virt.max_nsid || nsid == 0) {
if (nsid > subsystem->dev.max_nsid || nsid == 0) {
SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid);
response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
bdev = subsystem->dev.virt.ns_list[nsid - 1];
bdev = subsystem->dev.ns_list[nsid - 1];
if (bdev == NULL) {
response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
desc = subsystem->dev.virt.desc[nsid - 1];
ch = subsystem->dev.virt.ch[nsid - 1];
desc = subsystem->dev.desc[nsid - 1];
ch = subsystem->dev.ch[nsid - 1];
switch (cmd->opc) {
case SPDK_NVME_OPC_READ:
case SPDK_NVME_OPC_WRITE:
return nvmf_virtual_ctrlr_rw_cmd(bdev, desc, ch, req);
return nvmf_bdev_ctrlr_rw_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_FLUSH:
return nvmf_virtual_ctrlr_flush_cmd(bdev, desc, ch, req);
return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_DATASET_MANAGEMENT:
return nvmf_virtual_ctrlr_dsm_cmd(bdev, desc, ch, req);
return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req);
default:
return nvmf_virtual_ctrlr_nvme_passthru_io(bdev, desc, ch, req);
return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req);
}
}
static int
nvmf_virtual_ctrlr_attach(struct spdk_nvmf_subsystem *subsystem)
nvmf_bdev_ctrlr_attach(struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_bdev *bdev;
struct spdk_io_channel *ch;
uint32_t i;
for (i = 0; i < subsystem->dev.virt.max_nsid; i++) {
bdev = subsystem->dev.virt.ns_list[i];
for (i = 0; i < subsystem->dev.max_nsid; i++) {
bdev = subsystem->dev.ns_list[i];
if (bdev == NULL) {
continue;
}
ch = spdk_bdev_get_io_channel(subsystem->dev.virt.desc[i]);
ch = spdk_bdev_get_io_channel(subsystem->dev.desc[i]);
if (ch == NULL) {
SPDK_ERRLOG("io_channel allocation failed\n");
return -1;
}
subsystem->dev.virt.ch[i] = ch;
subsystem->dev.ch[i] = ch;
}
return 0;
}
static void
nvmf_virtual_ctrlr_detach(struct spdk_nvmf_subsystem *subsystem)
nvmf_bdev_ctrlr_detach(struct spdk_nvmf_subsystem *subsystem)
{
uint32_t i;
for (i = 0; i < subsystem->dev.virt.max_nsid; i++) {
if (subsystem->dev.virt.ns_list[i]) {
spdk_put_io_channel(subsystem->dev.virt.ch[i]);
spdk_bdev_close(subsystem->dev.virt.desc[i]);
subsystem->dev.virt.ch[i] = NULL;
subsystem->dev.virt.ns_list[i] = NULL;
for (i = 0; i < subsystem->dev.max_nsid; i++) {
if (subsystem->dev.ns_list[i]) {
spdk_put_io_channel(subsystem->dev.ch[i]);
spdk_bdev_close(subsystem->dev.desc[i]);
subsystem->dev.ch[i] = NULL;
subsystem->dev.ns_list[i] = NULL;
}
}
subsystem->dev.virt.max_nsid = 0;
subsystem->dev.max_nsid = 0;
}
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_virtual_ctrlr_ops = {
.attach = nvmf_virtual_ctrlr_attach,
.ctrlr_get_data = nvmf_virtual_ctrlr_get_data,
.process_admin_cmd = nvmf_virtual_ctrlr_process_admin_cmd,
.process_io_cmd = nvmf_virtual_ctrlr_process_io_cmd,
.poll_for_completions = nvmf_virtual_ctrlr_poll_for_completions,
.detach = nvmf_virtual_ctrlr_detach,
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_bdev_ctrlr_ops = {
.attach = nvmf_bdev_ctrlr_attach,
.ctrlr_get_data = nvmf_bdev_ctrlr_get_data,
.process_admin_cmd = nvmf_bdev_ctrlr_process_admin_cmd,
.process_io_cmd = nvmf_bdev_ctrlr_process_io_cmd,
.poll_for_completions = nvmf_bdev_ctrlr_poll_for_completions,
.detach = nvmf_bdev_ctrlr_detach,
};

View File

@ -1,326 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "subsystem.h"
#include "session.h"
#include "request.h"
#include "spdk/nvme.h"
#include "spdk/nvmf_spec.h"
#include "spdk/trace.h"
#include "spdk/util.h"
#include "spdk/event.h"
#include "spdk_internal/log.h"
static void
nvmf_direct_ctrlr_get_data(struct spdk_nvmf_session *session)
{
const struct spdk_nvme_ctrlr_data *cdata;
cdata = spdk_nvme_ctrlr_get_data(session->subsys->dev.direct.ctrlr);
memcpy(&session->vcdata, cdata, sizeof(struct spdk_nvme_ctrlr_data));
}
static void
nvmf_direct_ctrlr_poll_for_admin_completions(void *arg)
{
struct spdk_nvmf_subsystem *subsystem = arg;
spdk_nvme_ctrlr_process_admin_completions(subsystem->dev.direct.ctrlr);
}
static void
nvmf_direct_ctrlr_poll_for_completions(struct spdk_nvmf_subsystem *subsystem)
{
if (subsystem->dev.direct.outstanding_admin_cmd_count > 0) {
nvmf_direct_ctrlr_poll_for_admin_completions(subsystem);
}
if (subsystem->dev.direct.admin_poller == NULL) {
int lcore = spdk_env_get_current_core();
spdk_poller_register(&subsystem->dev.direct.admin_poller,
nvmf_direct_ctrlr_poll_for_admin_completions,
subsystem, lcore, 10000);
}
spdk_nvme_qpair_process_completions(subsystem->dev.direct.io_qpair, 0);
}
static void
nvmf_direct_ctrlr_complete_cmd(void *ctx, const struct spdk_nvme_cpl *cmp)
{
struct spdk_nvmf_request *req = ctx;
spdk_trace_record(TRACE_NVMF_LIB_COMPLETE, 0, 0, (uint64_t)req, 0);
req->rsp->nvme_cpl = *cmp;
spdk_nvmf_request_complete(req);
}
static void
nvmf_direct_ctrlr_complete_admin_cmd(void *ctx, const struct spdk_nvme_cpl *cmp)
{
struct spdk_nvmf_request *req = ctx;
struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys;
subsystem->dev.direct.outstanding_admin_cmd_count--;
nvmf_direct_ctrlr_complete_cmd(ctx, cmp);
}
static int
nvmf_direct_ctrlr_admin_identify_nslist(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvmf_request *req)
{
struct spdk_nvme_ns *ns;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
uint32_t req_ns_id = cmd->nsid;
uint32_t i, num_ns, count = 0;
struct spdk_nvme_ns_list *ns_list;
if (req_ns_id >= 0xfffffffeUL) {
return -1;
}
memset(req->data, 0, req->length);
num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
ns_list = (struct spdk_nvme_ns_list *)req->data;
for (i = 1; i <= num_ns; i++) {
ns = spdk_nvme_ctrlr_get_ns(ctrlr, i);
if (!spdk_nvme_ns_is_active(ns)) {
continue;
}
if (i <= req_ns_id) {
continue;
}
ns_list->ns_list[count++] = i;
if (count == SPDK_COUNTOF(ns_list->ns_list)) {
break;
}
}
return 0;
}
static int
nvmf_direct_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
struct spdk_nvmf_subsystem *subsystem = session->subsys;
union spdk_nvme_vs_register vs;
int rc = 0;
uint8_t feature;
/* pre-set response details for this command */
response->status.sc = SPDK_NVME_SC_SUCCESS;
switch (cmd->opc) {
case SPDK_NVME_OPC_IDENTIFY:
if (req->data == NULL || req->length < 4096) {
SPDK_ERRLOG("identify command with invalid buffer\n");
response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_CTRLR) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Identify Controller\n");
/* pull from virtual controller context */
memcpy(req->data, &session->vcdata, sizeof(struct spdk_nvme_ctrlr_data));
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
vs = spdk_nvme_ctrlr_get_regs_vs(subsystem->dev.direct.ctrlr);
if (vs.raw < SPDK_NVME_VERSION(1, 1, 0)) {
/* fill in identify ns list with virtual controller information */
rc = nvmf_direct_ctrlr_admin_identify_nslist(subsystem->dev.direct.ctrlr, req);
if (rc < 0) {
SPDK_ERRLOG("Invalid Namespace or Format\n");
response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
}
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
}
goto passthrough;
case SPDK_NVME_OPC_GET_FEATURES:
feature = cmd->cdw10 & 0xff; /* mask out the FID value */
switch (feature) {
case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
return spdk_nvmf_session_get_features_number_of_queues(req);
case SPDK_NVME_FEAT_HOST_IDENTIFIER:
return spdk_nvmf_session_get_features_host_identifier(req);
case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
return spdk_nvmf_session_get_features_keep_alive_timer(req);
case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
return spdk_nvmf_session_get_features_async_event_configuration(req);
default:
goto passthrough;
}
break;
case SPDK_NVME_OPC_SET_FEATURES:
feature = cmd->cdw10 & 0xff; /* mask out the FID value */
switch (feature) {
case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
return spdk_nvmf_session_set_features_number_of_queues(req);
case SPDK_NVME_FEAT_HOST_IDENTIFIER:
return spdk_nvmf_session_set_features_host_identifier(req);
case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
return spdk_nvmf_session_set_features_keep_alive_timer(req);
case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
return spdk_nvmf_session_set_features_async_event_configuration(req);
default:
goto passthrough;
}
break;
case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
return spdk_nvmf_session_async_event_request(req);
case SPDK_NVME_OPC_KEEP_ALIVE:
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Keep Alive\n");
/*
* To handle keep alive just clear or reset the
* session based keep alive duration counter.
* When added, a separate timer based process
* will monitor if the time since last recorded
* keep alive has exceeded the max duration and
* take appropriate action.
*/
//session->keep_alive_timestamp = ;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
case SPDK_NVME_OPC_CREATE_IO_SQ:
case SPDK_NVME_OPC_CREATE_IO_CQ:
case SPDK_NVME_OPC_DELETE_IO_SQ:
case SPDK_NVME_OPC_DELETE_IO_CQ:
SPDK_ERRLOG("Admin opc 0x%02X not allowed in NVMf\n", cmd->opc);
response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
default:
passthrough:
SPDK_TRACELOG(SPDK_TRACE_NVMF, "admin_cmd passthrough: opc 0x%02x\n", cmd->opc);
rc = spdk_nvme_ctrlr_cmd_admin_raw(subsystem->dev.direct.ctrlr,
cmd,
req->data, req->length,
nvmf_direct_ctrlr_complete_admin_cmd,
req);
if (rc) {
SPDK_ERRLOG("Error submitting admin opc 0x%02x\n", cmd->opc);
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
subsystem->dev.direct.outstanding_admin_cmd_count++;
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}
}
static int
nvmf_direct_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys;
int rc;
rc = spdk_nvme_ctrlr_cmd_io_raw(subsystem->dev.direct.ctrlr,
subsystem->dev.direct.io_qpair,
&req->cmd->nvme_cmd,
req->data, req->length,
nvmf_direct_ctrlr_complete_cmd,
req);
if (rc) {
SPDK_ERRLOG("Failed to submit request %p\n", req);
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}
static void
nvmf_direct_ctrlr_detach(struct spdk_nvmf_subsystem *subsystem)
{
if (subsystem->dev.direct.ctrlr) {
if (subsystem->dev.direct.admin_poller != NULL) {
spdk_poller_unregister(&subsystem->dev.direct.admin_poller, NULL);
}
spdk_nvme_detach(subsystem->dev.direct.ctrlr);
}
}
static void
nvmf_direct_ctrlr_complete_aer(void *arg, const struct spdk_nvme_cpl *cpl)
{
struct spdk_nvmf_subsystem *subsystem = (struct spdk_nvmf_subsystem *) arg;
struct spdk_nvmf_session *session;
TAILQ_FOREACH(session, &subsystem->sessions, link) {
if (session->aer_req) {
nvmf_direct_ctrlr_complete_cmd(session->aer_req, cpl);
session->aer_req = NULL;
}
}
}
static int
nvmf_direct_ctrlr_attach(struct spdk_nvmf_subsystem *subsystem)
{
subsystem->dev.direct.io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(subsystem->dev.direct.ctrlr, NULL,
0);
if (subsystem->dev.direct.io_qpair == NULL) {
SPDK_ERRLOG("spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
return -1;
}
spdk_nvme_ctrlr_register_aer_callback(subsystem->dev.direct.ctrlr,
nvmf_direct_ctrlr_complete_aer, subsystem);
return 0;
}
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_direct_ctrlr_ops = {
.attach = nvmf_direct_ctrlr_attach,
.ctrlr_get_data = nvmf_direct_ctrlr_get_data,
.process_admin_cmd = nvmf_direct_ctrlr_process_admin_cmd,
.process_io_cmd = nvmf_direct_ctrlr_process_io_cmd,
.poll_for_completions = nvmf_direct_ctrlr_poll_for_completions,
.detach = nvmf_direct_ctrlr_detach,
};

View File

@ -640,7 +640,7 @@ spdk_nvmf_session_poll(struct spdk_nvmf_session *session)
struct spdk_nvmf_conn *conn, *tmp;
struct spdk_nvmf_subsystem *subsys = session->subsys;
if (subsys->is_removed && subsys->mode == NVMF_SUBSYSTEM_MODE_VIRTUAL) {
if (subsys->is_removed) {
if (session->aer_req) {
struct spdk_nvmf_request *aer = session->aer_req;

View File

@ -179,7 +179,6 @@ spdk_nvmf_valid_nqn(const char *nqn)
struct spdk_nvmf_subsystem *
spdk_nvmf_create_subsystem(const char *nqn,
enum spdk_nvmf_subtype type,
enum spdk_nvmf_subsystem_mode mode,
void *cb_ctx,
spdk_nvmf_subsystem_connect_fn connect_cb,
spdk_nvmf_subsystem_disconnect_fn disconnect_cb)
@ -199,7 +198,6 @@ spdk_nvmf_create_subsystem(const char *nqn,
subsystem->id = g_nvmf_tgt.current_subsystem_id;
subsystem->subtype = type;
subsystem->mode = mode;
subsystem->cb_ctx = cb_ctx;
subsystem->connect_cb = connect_cb;
subsystem->disconnect_cb = disconnect_cb;
@ -210,11 +208,8 @@ spdk_nvmf_create_subsystem(const char *nqn,
if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) {
subsystem->ops = &spdk_nvmf_discovery_ctrlr_ops;
} else if (mode == NVMF_SUBSYSTEM_MODE_DIRECT) {
subsystem->ops = &spdk_nvmf_direct_ctrlr_ops;
subsystem->dev.direct.outstanding_admin_cmd_count = 0;
} else {
subsystem->ops = &spdk_nvmf_virtual_ctrlr_ops;
subsystem->ops = &spdk_nvmf_bdev_ctrlr_ops;
}
TAILQ_INSERT_TAIL(&g_nvmf_tgt.subsystems, subsystem, entries);
@ -369,16 +364,6 @@ spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *
return 0;
}
int
nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_pci_addr *pci_addr)
{
subsystem->dev.direct.ctrlr = ctrlr;
subsystem->dev.direct.pci_addr = *pci_addr;
return 0;
}
static void spdk_nvmf_ctrlr_hot_remove(void *remove_ctx)
{
struct spdk_nvmf_subsystem *subsystem = (struct spdk_nvmf_subsystem *)remove_ctx;
@ -393,12 +378,10 @@ spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bd
uint32_t i;
int rc;
assert(subsystem->mode == NVMF_SUBSYSTEM_MODE_VIRTUAL);
if (nsid == 0) {
/* NSID not specified - find a free index */
for (i = 0; i < MAX_VIRTUAL_NAMESPACE; i++) {
if (subsystem->dev.virt.ns_list[i] == NULL) {
if (subsystem->dev.ns_list[i] == NULL) {
nsid = i + 1;
break;
}
@ -415,14 +398,14 @@ spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bd
return 0;
}
if (subsystem->dev.virt.ns_list[i]) {
if (subsystem->dev.ns_list[i]) {
SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", nsid);
return 0;
}
}
rc = spdk_bdev_open(bdev, true, spdk_nvmf_ctrlr_hot_remove, subsystem,
&subsystem->dev.virt.desc[i]);
&subsystem->dev.desc[i]);
if (rc != 0) {
SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n",
subsystem->subnqn, spdk_bdev_get_name(bdev), rc);
@ -434,19 +417,15 @@ spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bd
spdk_bdev_get_name(bdev),
nsid);
subsystem->dev.virt.ns_list[i] = bdev;
subsystem->dev.virt.max_nsid = spdk_max(subsystem->dev.virt.max_nsid, nsid);
subsystem->dev.ns_list[i] = bdev;
subsystem->dev.max_nsid = spdk_max(subsystem->dev.max_nsid, nsid);
return nsid;
}
const char *
spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
{
if (subsystem->mode != NVMF_SUBSYSTEM_MODE_VIRTUAL) {
return NULL;
}
return subsystem->dev.virt.sn;
return subsystem->dev.sn;
}
int
@ -454,11 +433,7 @@ spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn
{
size_t len, max_len;
if (subsystem->mode != NVMF_SUBSYSTEM_MODE_VIRTUAL) {
return -1;
}
max_len = sizeof(subsystem->dev.virt.sn) - 1;
max_len = sizeof(subsystem->dev.sn) - 1;
len = strlen(sn);
if (len > max_len) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Invalid sn \"%s\": length %zu > max %zu\n",
@ -466,7 +441,7 @@ spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn
return -1;
}
snprintf(subsystem->dev.virt.sn, sizeof(subsystem->dev.virt.sn), "%s", sn);
snprintf(subsystem->dev.sn, sizeof(subsystem->dev.sn), "%s", sn);
return 0;
}
@ -485,12 +460,3 @@ spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem)
{
return subsystem->subtype;
}
/* Workaround for astyle formatting bug */
typedef enum spdk_nvmf_subsystem_mode nvmf_mode_t;
nvmf_mode_t
spdk_nvmf_subsystem_get_mode(struct spdk_nvmf_subsystem *subsystem)
{
return subsystem->mode;
}

View File

@ -43,8 +43,7 @@ struct spdk_nvmf_subsystem *spdk_nvmf_find_subsystem_with_cntlid(uint16_t cntlid
void spdk_nvmf_get_discovery_log_page(void *buffer, uint64_t offset, uint32_t length);
extern const struct spdk_nvmf_ctrlr_ops spdk_nvmf_direct_ctrlr_ops;
extern const struct spdk_nvmf_ctrlr_ops spdk_nvmf_virtual_ctrlr_ops;
extern const struct spdk_nvmf_ctrlr_ops spdk_nvmf_bdev_ctrlr_ops;
extern const struct spdk_nvmf_ctrlr_ops spdk_nvmf_discovery_ctrlr_ops;
#endif /* SPDK_NVMF_SUBSYSTEM_H */

View File

@ -400,7 +400,6 @@ def construct_nvmf_subsystem(args):
params = {
'core': args.core,
'mode': args.mode,
'nqn': args.nqn,
'listen_addresses': listen_addresses,
'serial_number': args.serial_number,
@ -418,14 +417,10 @@ def construct_nvmf_subsystem(args):
namespaces.append(u)
params['namespaces'] = namespaces
if args.pci_address:
params['pci_address'] = args.pci_address
jsonrpc_call('construct_nvmf_subsystem', params)
p = subparsers.add_parser('construct_nvmf_subsystem', help='Add a nvmf subsystem')
p.add_argument("-c", "--core", help='The core Nvmf target run on', type=int, default=-1)
p.add_argument('mode', help='Target mode: Virtual or Direct')
p.add_argument('nqn', help='Target nqn(ASCII)')
p.add_argument('listen', help="""comma-separated list of Listen <trtype:transport_name traddr:address trsvcid:port_id> pairs enclosed
in quotes. Format: 'trtype:transport0 traddr:traddr0 trsvcid:trsvcid0,trtype:transport1 traddr:traddr1 trsvcid:trsvcid1' etc
@ -433,10 +428,7 @@ Example: 'trtype:RDMA traddr:192.168.100.8 trsvcid:4420,trtype:RDMA traddr:192.1
p.add_argument('hosts', help="""Whitespace-separated list of host nqn list.
Format: 'nqn1 nqn2' etc
Example: 'nqn.2016-06.io.spdk:init nqn.2016-07.io.spdk:init'""")
p.add_argument("-p", "--pci_address", help="""Valid if mode == Direct.
Format: 'domain:device:function' etc
Example: '0000:00:01.0'""")
p.add_argument("-s", "--serial_number", help="""Valid if mode == Virtual.
p.add_argument("-s", "--serial_number", help="""
Format: 'sn' etc
Example: 'SPDK00000000000001'""", default='0000:00:01.0')
p.add_argument("-n", "--namespaces", help="""Whitespace-separated list of namespaces.

View File

@ -29,7 +29,7 @@ trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid 5260
echo "NVMf target has started."
bdevs=$($rpc_py construct_malloc_bdev 64 512)
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
echo "NVMf subsystem created."
timing_enter start_iscsi_tgt

View File

@ -38,8 +38,7 @@ bdevs="$bdevs $($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SI
modprobe -v nvme-rdma
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode2 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
nvme discover -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
@ -47,7 +46,6 @@ echo "Perform nvmf subsystem discovery via RPC"
$rpc_py get_nvmf_subsystems
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
trap - SIGINT SIGTERM EXIT

View File

@ -33,11 +33,9 @@ bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SI
modprobe -v nvme-rdma
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode2 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
mkdir -p /mnt/device
@ -73,10 +71,8 @@ done
sync
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" || true
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
trap - SIGINT SIGTERM EXIT

View File

@ -33,11 +33,9 @@ bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SI
modprobe -v nvme-rdma
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode2 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
$testdir/nvmf_fio.py 4096 1 write 1 verify
$testdir/nvmf_fio.py 4096 1 randwrite 1 verify
@ -46,10 +44,8 @@ $testdir/nvmf_fio.py 4096 128 randwrite 1 verify
sync
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" || true
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
rm -f ./local-job0-0-verify.state
rm -f ./local-job1-1-verify.state

View File

@ -25,7 +25,8 @@ trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
bdevs="$bdevs $($rpc_py construct_malloc_bdev 64 512)"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" '' -s SPDK00000000000001 -n "$bdevs"
$rootdir/test/lib/nvme/aer/aer -r "\
trtype:RDMA \

View File

@ -30,7 +30,8 @@ trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
bdevs="$bdevs $($rpc_py construct_malloc_bdev 64 512)"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" '' -s SPDK00000000000001 -n "$bdevs"
PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin

View File

@ -30,7 +30,7 @@ timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" '' -s SPDK00000000000001 -n "$bdevs"
$rootdir/examples/nvme/identify/identify -r "\
trtype:RDMA \

View File

@ -30,13 +30,11 @@ timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode2 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" '' -s SPDK00000000000001 -n "$bdevs"
$rootdir/examples/nvme/perf/perf -q 128 -s 4096 -w randrw -M 50 -t 1 -r "trtype:RDMA adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420"
sync
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
trap - SIGINT SIGTERM EXIT

View File

@ -33,7 +33,7 @@ modprobe -v nvme-rdma
for i in `seq 1 11`
do
bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode${i} "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -s SPDK${i} -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode${i} "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -s SPDK${i} -n "$bdevs"
done
for i in `seq 1 11`; do

View File

@ -32,11 +32,9 @@ bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SI
modprobe -v nvme-rdma
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "*"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode2 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -s SPDK00000000000001 -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" '' -s SPDK00000000000001 -n "$bdevs"
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
nvme list
@ -53,7 +51,6 @@ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" || true
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
trap - SIGINT SIGTERM EXIT

View File

@ -32,8 +32,10 @@ else
times=3
fi
# get all available nvme bdf info.
bdfs=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}')
MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
# do frequent add delete.
for i in `seq 1 $times`
@ -41,7 +43,7 @@ do
j=0
for bdf in $bdfs; do
let j=j+1
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode$j "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" "" -p "$bdf"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode$j "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:4420" '' -s SPDK00000000000001 -n "$bdevs"
done
n=$j

View File

@ -32,7 +32,7 @@ timing_exit start_nvmf_tgt
for i in `seq 1 10`
do
bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode${i} "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -s SPDK${i} -n "$bdevs"
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode${i} "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -s SPDK${i} -n "$bdevs"
done
# Kill nvmf tgt without removing any subsystem to check whether it can shutdown correctly

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = direct.c discovery.c request.c session.c subsystem.c virtual.c
DIRS-y = request.c session.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
.PHONY: all clean $(DIRS-y)

View File

@ -0,0 +1 @@
ctrlr_bdev_ut

View File

@ -33,6 +33,6 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = virtual_ut.c
TEST_FILE = ctrlr_bdev_ut.c
include $(SPDK_ROOT_DIR)/mk/nvmf.unittest.mk

View File

@ -35,7 +35,7 @@
#include "spdk_cunit.h"
#include "virtual.c"
#include "ctrlr_bdev.c"
SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)
@ -219,7 +219,7 @@ void spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, int *sct,
}
static void
nvmf_test_nvmf_virtual_ctrlr_get_log_page(void)
nvmf_test_nvmf_bdev_ctrlr_get_log_page(void)
{
}
@ -239,7 +239,7 @@ int main(int argc, char **argv)
}
if (CU_add_test(suite, "virtual_ctrlr_get_log_page",
nvmf_test_nvmf_virtual_ctrlr_get_log_page) == NULL) {
nvmf_test_nvmf_bdev_ctrlr_get_log_page) == NULL) {
CU_cleanup_registry();
return CU_get_error();
}

View File

@ -0,0 +1 @@
ctrlr_discovery_ut

View File

@ -33,7 +33,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = discovery_ut.c
TEST_FILE = ctrlr_discovery_ut.c
OTHER_FILES = subsystem.c
include $(SPDK_ROOT_DIR)/mk/nvmf.unittest.mk

View File

@ -35,12 +35,11 @@
#include "spdk_cunit.h"
#include "discovery.c"
#include "ctrlr_discovery.c"
SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_direct_ctrlr_ops;
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_virtual_ctrlr_ops;
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_bdev_ctrlr_ops;
struct spdk_nvmf_tgt g_nvmf_tgt = {
.subsystems = TAILQ_HEAD_INITIALIZER(g_nvmf_tgt.subsystems)
@ -225,7 +224,7 @@ test_discovery_log(void)
/* Add one subsystem and verify that the discovery log contains it */
subsystem = spdk_nvmf_create_subsystem("nqn.2016-06.io.spdk:subsystem1", SPDK_NVMF_SUBTYPE_NVME,
NVMF_SUBSYSTEM_MODE_DIRECT, NULL, NULL, NULL);
NULL, NULL, NULL);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
listen_addr = spdk_nvmf_tgt_listen("test_transport1", SPDK_NVMF_ADRFAM_IPV4, "1234", "5678");

View File

@ -1 +0,0 @@
direct_ut

View File

@ -1,38 +0,0 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = direct_ut.c
include $(SPDK_ROOT_DIR)/mk/nvmf.unittest.mk

View File

@ -1 +0,0 @@
discovery_ut

View File

@ -36,8 +36,7 @@
#include "spdk_cunit.h"
#include "subsystem.h"
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_direct_ctrlr_ops;
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_virtual_ctrlr_ops;
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_bdev_ctrlr_ops;
const struct spdk_nvmf_ctrlr_ops spdk_nvmf_discovery_ctrlr_ops;
#include "subsystem.c"
@ -197,9 +196,8 @@ static void
test_spdk_nvmf_subsystem_add_ns(void)
{
struct spdk_nvmf_subsystem subsystem = {
.mode = NVMF_SUBSYSTEM_MODE_VIRTUAL,
.dev.virt.max_nsid = 0,
.dev.virt.ns_list = {},
.dev.max_nsid = 0,
.dev.ns_list = {},
};
struct spdk_bdev bdev1 = {}, bdev2 = {};
uint32_t nsid;
@ -208,19 +206,19 @@ test_spdk_nvmf_subsystem_add_ns(void)
nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, 0);
/* NSID 1 is the first unused ID */
CU_ASSERT(nsid == 1);
CU_ASSERT(subsystem.dev.virt.max_nsid == 1);
CU_ASSERT(subsystem.dev.virt.ns_list[nsid - 1] == &bdev1);
CU_ASSERT(subsystem.dev.max_nsid == 1);
CU_ASSERT(subsystem.dev.ns_list[nsid - 1] == &bdev1);
/* Request a specific NSID */
nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, 5);
CU_ASSERT(nsid == 5);
CU_ASSERT(subsystem.dev.virt.max_nsid == 5);
CU_ASSERT(subsystem.dev.virt.ns_list[nsid - 1] == &bdev2);
CU_ASSERT(subsystem.dev.max_nsid == 5);
CU_ASSERT(subsystem.dev.ns_list[nsid - 1] == &bdev2);
/* Request an NSID that is already in use */
nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, 5);
CU_ASSERT(nsid == 0);
CU_ASSERT(subsystem.dev.virt.max_nsid == 5);
CU_ASSERT(subsystem.dev.max_nsid == 5);
}
static void
@ -232,7 +230,7 @@ nvmf_test_create_subsystem(void)
strncpy(nqn, "nqn.2016-06.io.spdk:subsystem1", sizeof(nqn));
subsystem = spdk_nvmf_create_subsystem(nqn, SPDK_NVMF_SUBTYPE_NVME,
NVMF_SUBSYSTEM_MODE_DIRECT, NULL, NULL, NULL);
NULL, NULL, NULL);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_delete_subsystem(subsystem);
@ -243,7 +241,7 @@ nvmf_test_create_subsystem(void)
nqn[223] = '\0';
CU_ASSERT(strlen(nqn) == 223);
subsystem = spdk_nvmf_create_subsystem(nqn, SPDK_NVMF_SUBTYPE_NVME,
NVMF_SUBSYSTEM_MODE_DIRECT, NULL, NULL, NULL);
NULL, NULL, NULL);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_delete_subsystem(subsystem);
@ -254,7 +252,7 @@ nvmf_test_create_subsystem(void)
nqn[224] = '\0';
CU_ASSERT(strlen(nqn) == 224);
subsystem = spdk_nvmf_create_subsystem(nqn, SPDK_NVMF_SUBTYPE_NVME,
NVMF_SUBSYSTEM_MODE_DIRECT, NULL, NULL, NULL);
NULL, NULL, NULL);
CU_ASSERT(subsystem == NULL);
}

View File

@ -1 +0,0 @@
virtual_ut

View File

@ -69,12 +69,11 @@ $valgrind test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut
$valgrind test/unit/lib/log/log.c/log_ut
$valgrind test/unit/lib/nvmf/discovery.c/discovery_ut
$valgrind test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
$valgrind test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
$valgrind test/unit/lib/nvmf/request.c/request_ut
$valgrind test/unit/lib/nvmf/session.c/session_ut
$valgrind test/unit/lib/nvmf/subsystem.c/subsystem_ut
$valgrind test/unit/lib/nvmf/direct.c/direct_ut
$valgrind test/unit/lib/nvmf/virtual.c/virtual_ut
$valgrind test/unit/lib/scsi/dev.c/dev_ut
$valgrind test/unit/lib/scsi/lun.c/lun_ut