2015-09-21 15:52:41 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
2016-01-26 17:47:22 +00:00
|
|
|
* Copyright (c) Intel Corporation.
|
2015-09-21 15:52:41 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "nvme_internal.h"
|
|
|
|
|
2016-08-02 01:55:23 +00:00
|
|
|
struct nvme_driver _g_nvme_driver = {
|
2016-08-08 17:03:52 +00:00
|
|
|
.lock = PTHREAD_MUTEX_INITIALIZER,
|
2016-08-02 01:55:23 +00:00
|
|
|
.init_ctrlrs = TAILQ_HEAD_INITIALIZER(_g_nvme_driver.init_ctrlrs),
|
|
|
|
.attached_ctrlrs = TAILQ_HEAD_INITIALIZER(_g_nvme_driver.attached_ctrlrs),
|
2016-08-09 08:17:09 +00:00
|
|
|
.request_mempool = NULL,
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
|
2016-08-02 01:55:23 +00:00
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
int32_t spdk_nvme_retry_count;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
static struct spdk_nvme_ctrlr *
|
2015-09-21 15:52:41 +00:00
|
|
|
nvme_attach(void *devhandle)
|
|
|
|
{
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2015-09-21 15:52:41 +00:00
|
|
|
int status;
|
|
|
|
uint64_t phys_addr = 0;
|
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
ctrlr = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr),
|
|
|
|
64, &phys_addr);
|
2015-09-21 15:52:41 +00:00
|
|
|
if (ctrlr == NULL) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("could not allocate ctrlr\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = nvme_ctrlr_construct(ctrlr, devhandle);
|
|
|
|
if (status != 0) {
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(ctrlr);
|
2015-09-21 15:52:41 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctrlr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_lock(&g_spdk_nvme_driver->lock);
|
2016-01-29 20:15:29 +00:00
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
nvme_ctrlr_destruct(ctrlr);
|
2016-08-24 06:25:58 +00:00
|
|
|
TAILQ_REMOVE(&g_spdk_nvme_driver->attached_ctrlrs, ctrlr, tailq);
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(ctrlr);
|
2016-01-29 20:15:29 +00:00
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-02-09 18:06:48 +00:00
|
|
|
nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status *status = arg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy status into the argument passed by the caller, so that
|
|
|
|
* the caller can check the status to determine if the
|
|
|
|
* the request passed or failed.
|
|
|
|
*/
|
|
|
|
memcpy(&status->cpl, cpl, sizeof(*cpl));
|
|
|
|
status->done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nvme_request *
|
2016-01-22 23:56:20 +00:00
|
|
|
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
struct nvme_request *req = NULL;
|
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
req = spdk_mempool_get(g_spdk_nvme_driver->request_mempool);
|
2015-09-21 15:52:41 +00:00
|
|
|
if (req == NULL) {
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only memset up to (but not including) the children
|
|
|
|
* TAILQ_ENTRY. children, and following members, are
|
|
|
|
* only used as part of I/O splitting so we avoid
|
|
|
|
* memsetting them until it is actually needed.
|
2015-09-28 17:53:57 +00:00
|
|
|
* They will be initialized in nvme_request_add_child()
|
|
|
|
* if the request is split.
|
2015-09-21 15:52:41 +00:00
|
|
|
*/
|
|
|
|
memset(req, 0, offsetof(struct nvme_request, children));
|
|
|
|
req->cb_fn = cb_fn;
|
|
|
|
req->cb_arg = cb_arg;
|
2016-01-22 23:56:20 +00:00
|
|
|
req->payload = *payload;
|
2015-12-04 03:52:37 +00:00
|
|
|
req->payload_size = payload_size;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2016-01-22 23:56:20 +00:00
|
|
|
struct nvme_request *
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
|
|
|
void *cb_arg)
|
2016-01-22 23:56:20 +00:00
|
|
|
{
|
|
|
|
struct nvme_payload payload;
|
|
|
|
|
|
|
|
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
|
|
|
payload.u.contig = buffer;
|
2016-04-07 06:52:43 +00:00
|
|
|
payload.md = NULL;
|
2016-01-22 23:56:20 +00:00
|
|
|
|
|
|
|
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nvme_request *
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
2016-01-22 23:56:20 +00:00
|
|
|
{
|
|
|
|
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
|
|
|
}
|
|
|
|
|
2016-09-12 21:30:59 +00:00
|
|
|
static void
|
|
|
|
nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_request *req = arg;
|
|
|
|
enum spdk_nvme_data_transfer xfer;
|
|
|
|
|
|
|
|
if (req->user_buffer && req->payload_size) {
|
|
|
|
/* Copy back to the user buffer and free the contig buffer */
|
|
|
|
assert(req->payload.type == NVME_PAYLOAD_TYPE_CONTIG);
|
|
|
|
xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
|
|
|
|
if (xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST ||
|
|
|
|
xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
|
|
|
|
memcpy(req->user_buffer, req->payload.u.contig, req->payload_size);
|
|
|
|
}
|
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(req->payload.u.contig);
|
2016-09-12 21:30:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Call the user's original callback now that the buffer has been copied */
|
|
|
|
req->user_cb_fn(req->user_cb_arg, cpl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate a request as well as a physically contiguous buffer to copy to/from the user's buffer.
|
|
|
|
*
|
|
|
|
* This is intended for use in non-fast-path functions (admin commands, reservations, etc.)
|
|
|
|
* where the overhead of a copy is not a problem.
|
|
|
|
*/
|
|
|
|
struct nvme_request *
|
|
|
|
nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
|
|
|
void *cb_arg, bool host_to_controller)
|
|
|
|
{
|
|
|
|
struct nvme_request *req;
|
|
|
|
void *contig_buffer = NULL;
|
|
|
|
uint64_t phys_addr;
|
|
|
|
|
|
|
|
if (buffer && payload_size) {
|
2016-08-12 17:24:34 +00:00
|
|
|
contig_buffer = spdk_zmalloc(payload_size, 4096, &phys_addr);
|
2016-09-12 21:30:59 +00:00
|
|
|
if (!contig_buffer) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (host_to_controller) {
|
|
|
|
memcpy(contig_buffer, buffer, payload_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
req = nvme_allocate_request_contig(contig_buffer, payload_size, nvme_user_copy_cmd_complete, NULL);
|
|
|
|
if (!req) {
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(buffer);
|
2016-09-12 21:30:59 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
req->user_cb_fn = cb_fn;
|
|
|
|
req->user_cb_arg = cb_arg;
|
|
|
|
req->user_buffer = buffer;
|
|
|
|
req->cb_arg = req;
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2015-10-20 00:03:04 +00:00
|
|
|
void
|
|
|
|
nvme_free_request(struct nvme_request *req)
|
|
|
|
{
|
2016-08-08 20:55:27 +00:00
|
|
|
assert(req != NULL);
|
|
|
|
assert(req->num_children == 0);
|
2016-04-08 17:00:06 +00:00
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_mempool_put(g_spdk_nvme_driver->request_mempool, req);
|
2015-10-20 00:03:04 +00:00
|
|
|
}
|
|
|
|
|
2016-08-24 03:25:18 +00:00
|
|
|
int
|
|
|
|
nvme_mutex_init_shared(pthread_mutex_t *mtx)
|
|
|
|
{
|
|
|
|
pthread_mutexattr_t attr;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (pthread_mutexattr_init(&attr)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
|
|
|
|
pthread_mutex_init(mtx, &attr)) {
|
|
|
|
rc = -1;
|
|
|
|
}
|
|
|
|
pthread_mutexattr_destroy(&attr);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
struct nvme_enum_ctx {
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_probe_cb probe_cb;
|
2016-01-29 20:15:29 +00:00
|
|
|
void *cb_ctx;
|
|
|
|
};
|
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
/* This function must only be called while holding g_spdk_nvme_driver->lock */
|
2016-01-29 20:15:29 +00:00
|
|
|
static int
|
2016-02-03 21:36:26 +00:00
|
|
|
nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
|
2016-01-29 20:15:29 +00:00
|
|
|
{
|
|
|
|
struct nvme_enum_ctx *enum_ctx = ctx;
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2016-03-07 17:36:17 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts opts;
|
2016-01-29 20:15:29 +00:00
|
|
|
|
|
|
|
/* Verify that this controller is not already attached */
|
2016-08-24 06:25:58 +00:00
|
|
|
TAILQ_FOREACH(ctrlr, &g_spdk_nvme_driver->attached_ctrlrs, tailq) {
|
2016-01-29 20:15:29 +00:00
|
|
|
/* NOTE: This assumes that the PCI abstraction layer will use the same device handle
|
|
|
|
* across enumerations; we could compare by BDF instead if this is not true.
|
|
|
|
*/
|
|
|
|
if (pci_dev == ctrlr->devhandle) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-07 17:36:17 +00:00
|
|
|
spdk_nvme_ctrlr_opts_set_defaults(&opts);
|
|
|
|
|
|
|
|
if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev, &opts)) {
|
2016-01-29 20:15:29 +00:00
|
|
|
ctrlr = nvme_attach(pci_dev);
|
|
|
|
if (ctrlr == NULL) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_attach() failed\n");
|
2016-01-29 20:15:29 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-03-07 17:36:17 +00:00
|
|
|
ctrlr->opts = opts;
|
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->init_ctrlrs, ctrlr, tailq);
|
2016-01-29 20:15:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-06-21 23:49:26 +00:00
|
|
|
spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
|
|
|
|
spdk_nvme_remove_cb remove_cb)
|
2016-01-29 20:15:29 +00:00
|
|
|
{
|
|
|
|
int rc, start_rc;
|
|
|
|
struct nvme_enum_ctx enum_ctx;
|
2016-02-23 23:36:13 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;
|
2016-01-29 20:15:29 +00:00
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_lock(&g_spdk_nvme_driver->lock);
|
2016-01-29 20:15:29 +00:00
|
|
|
|
2016-08-09 08:17:09 +00:00
|
|
|
if (g_spdk_nvme_driver->request_mempool == NULL) {
|
2016-08-12 17:24:34 +00:00
|
|
|
g_spdk_nvme_driver->request_mempool = spdk_mempool_create("nvme_request", 8192,
|
2016-09-28 14:58:51 +00:00
|
|
|
sizeof(struct nvme_request), -1);
|
2016-08-09 08:17:09 +00:00
|
|
|
if (g_spdk_nvme_driver->request_mempool == NULL) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("Unable to allocate pool of requests\n");
|
2016-08-09 08:17:09 +00:00
|
|
|
pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
enum_ctx.probe_cb = probe_cb;
|
|
|
|
enum_ctx.cb_ctx = cb_ctx;
|
|
|
|
|
2016-08-08 22:57:49 +00:00
|
|
|
rc = spdk_pci_enumerate(SPDK_PCI_DEVICE_NVME, nvme_enum_cb, &enum_ctx);
|
2016-01-29 20:15:29 +00:00
|
|
|
/*
|
|
|
|
* Keep going even if one or more nvme_attach() calls failed,
|
|
|
|
* but maintain the value of rc to signal errors when we return.
|
|
|
|
*/
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
/* Initialize all new controllers in the init_ctrlrs list in parallel. */
|
2016-08-24 06:25:58 +00:00
|
|
|
while (!TAILQ_EMPTY(&g_spdk_nvme_driver->init_ctrlrs)) {
|
|
|
|
TAILQ_FOREACH_SAFE(ctrlr, &g_spdk_nvme_driver->init_ctrlrs, tailq, ctrlr_tmp) {
|
2016-02-23 23:36:13 +00:00
|
|
|
/* Drop the driver lock while calling nvme_ctrlr_process_init()
|
|
|
|
* since it needs to acquire the driver lock internally when calling
|
|
|
|
* nvme_ctrlr_start().
|
|
|
|
*
|
|
|
|
* TODO: Rethink the locking - maybe reset should take the lock so that start() and
|
|
|
|
* the functions it calls (in particular nvme_ctrlr_set_num_qpairs())
|
|
|
|
* can assume it is held.
|
2016-01-29 20:15:29 +00:00
|
|
|
*/
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
|
2016-02-23 23:36:13 +00:00
|
|
|
start_rc = nvme_ctrlr_process_init(ctrlr);
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_lock(&g_spdk_nvme_driver->lock);
|
2016-02-23 23:36:13 +00:00
|
|
|
|
|
|
|
if (start_rc) {
|
|
|
|
/* Controller failed to initialize. */
|
2016-08-24 06:25:58 +00:00
|
|
|
TAILQ_REMOVE(&g_spdk_nvme_driver->init_ctrlrs, ctrlr, tailq);
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_destruct(ctrlr);
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(ctrlr);
|
2016-02-23 23:36:13 +00:00
|
|
|
rc = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->state == NVME_CTRLR_STATE_READY) {
|
|
|
|
/*
|
|
|
|
* Controller has been initialized.
|
|
|
|
* Move it to the attached_ctrlrs list.
|
|
|
|
*/
|
2016-08-24 06:25:58 +00:00
|
|
|
TAILQ_REMOVE(&g_spdk_nvme_driver->init_ctrlrs, ctrlr, tailq);
|
|
|
|
TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->attached_ctrlrs, ctrlr, tailq);
|
2016-02-23 23:36:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlock while calling attach_cb() so the user can call other functions
|
|
|
|
* that may take the driver lock, like nvme_detach().
|
|
|
|
*/
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
|
2016-03-07 17:36:17 +00:00
|
|
|
attach_cb(cb_ctx, ctrlr->devhandle, ctrlr, &ctrlr->opts);
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_lock(&g_spdk_nvme_driver->lock);
|
2016-02-23 23:36:13 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2016-01-29 20:15:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
|
2016-01-29 20:15:29 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2016-08-12 16:03:40 +00:00
|
|
|
|
|
|
|
SPDK_LOG_REGISTER_TRACE_FLAG("nvme", SPDK_TRACE_NVME)
|