2015-09-21 15:52:41 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
2016-01-26 17:47:22 +00:00
|
|
|
* Copyright (c) Intel Corporation.
|
2015-09-21 15:52:41 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
2016-02-03 21:36:26 +00:00
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
#include "nvme_internal.h"
|
2016-08-10 17:41:12 +00:00
|
|
|
#include "spdk/env.h"
|
2016-01-27 07:08:53 +00:00
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_async_event_request *aer);
|
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
static int
|
|
|
|
nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
|
|
|
|
{
|
|
|
|
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
|
|
|
|
&cc->raw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
|
|
|
|
{
|
|
|
|
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
|
|
|
|
&csts->raw);
|
|
|
|
}
|
|
|
|
|
2016-10-13 23:08:22 +00:00
|
|
|
int
|
2016-10-13 00:00:54 +00:00
|
|
|
nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
|
|
|
|
{
|
|
|
|
return ctrlr->transport->ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
|
|
|
|
&cap->raw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
|
|
|
|
{
|
|
|
|
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
|
|
|
|
&vs->raw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
|
|
|
|
{
|
|
|
|
return ctrlr->transport->ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
|
|
|
|
cc->raw);
|
|
|
|
}
|
|
|
|
|
2016-03-07 17:36:17 +00:00
|
|
|
void
|
|
|
|
spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts)
|
|
|
|
{
|
|
|
|
opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
|
2016-05-05 01:41:16 +00:00
|
|
|
opts->use_cmb_sqs = false;
|
2016-06-14 07:19:38 +00:00
|
|
|
opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
|
2016-10-24 21:19:09 +00:00
|
|
|
opts->keep_alive_timeout_ms = 10 * 1000;
|
2016-03-07 17:36:17 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 21:19:02 +00:00
|
|
|
struct spdk_nvme_qpair *
|
|
|
|
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
enum spdk_nvme_qprio qprio)
|
|
|
|
{
|
2016-10-18 16:49:07 +00:00
|
|
|
uint32_t qid;
|
2016-02-29 21:19:02 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2016-06-14 07:19:38 +00:00
|
|
|
union spdk_nvme_cc_register cc;
|
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cc failed\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-02-29 21:19:02 +00:00
|
|
|
|
|
|
|
/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
|
|
|
|
if ((qprio & 3) != qprio) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-14 07:19:38 +00:00
|
|
|
/*
|
|
|
|
* Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
|
|
|
|
* default round robin arbitration method.
|
|
|
|
*/
|
|
|
|
if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (qprio != SPDK_NVME_QPRIO_URGENT)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
|
2016-06-14 07:19:38 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
|
|
|
|
/*
|
2016-10-18 16:49:07 +00:00
|
|
|
* Get the first available I/O queue ID.
|
2016-02-29 21:19:02 +00:00
|
|
|
*/
|
2016-10-18 16:49:07 +00:00
|
|
|
qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
|
|
|
|
if (qid > ctrlr->opts.num_io_queues) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "No free I/O queue IDs\n");
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-18 16:49:07 +00:00
|
|
|
qpair = ctrlr->transport->ctrlr_create_io_qpair(ctrlr, qid, qprio);
|
|
|
|
if (qpair == NULL) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport->ctrlr_create_io_qpair() failed\n");
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2016-10-18 16:49:07 +00:00
|
|
|
spdk_bit_array_clear(ctrlr->free_io_qids, qid);
|
2016-02-29 21:19:02 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
|
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
|
|
|
|
return qpair;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
|
|
|
|
|
|
|
if (qpair == NULL) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrlr = qpair->ctrlr;
|
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
|
2016-10-18 16:49:07 +00:00
|
|
|
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
|
|
|
|
spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
|
|
|
|
|
2016-10-13 00:33:37 +00:00
|
|
|
if (ctrlr->transport->ctrlr_delete_io_qpair(ctrlr, qpair)) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-29 21:19:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-06 05:43:33 +00:00
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
|
2016-02-08 23:06:31 +00:00
|
|
|
struct spdk_nvme_intel_log_page_directory *log_page_directory)
|
2016-01-06 05:43:33 +00:00
|
|
|
{
|
2016-01-27 07:08:53 +00:00
|
|
|
struct pci_id pci_id;
|
|
|
|
|
2016-10-12 23:18:13 +00:00
|
|
|
if (log_page_directory == NULL) {
|
2016-01-06 05:43:33 +00:00
|
|
|
return;
|
2016-10-12 23:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->transport->ctrlr_get_pci_id(ctrlr, &pci_id)) {
|
|
|
|
return;
|
|
|
|
}
|
2016-01-06 05:43:33 +00:00
|
|
|
|
2016-10-12 23:18:13 +00:00
|
|
|
if (pci_id.vendor_id != SPDK_PCI_VID_INTEL) {
|
|
|
|
return;
|
|
|
|
}
|
2016-01-27 07:08:53 +00:00
|
|
|
|
2016-02-08 23:06:31 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
|
2016-01-27 07:08:53 +00:00
|
|
|
if (log_page_directory->read_latency_log_len ||
|
2016-10-24 23:29:47 +00:00
|
|
|
(ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
|
2016-02-08 23:06:31 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
2016-01-27 07:08:53 +00:00
|
|
|
if (log_page_directory->write_latency_log_len ||
|
2016-10-24 23:29:47 +00:00
|
|
|
(ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
|
2016-02-08 23:06:31 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
|
|
|
if (log_page_directory->temperature_statistics_log_len) {
|
2016-02-08 23:06:31 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
|
|
|
if (log_page_directory->smart_log_len) {
|
2016-02-08 23:06:31 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
2016-05-04 03:17:59 +00:00
|
|
|
if (log_page_directory->marketing_description_log_len) {
|
|
|
|
ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
|
|
|
|
}
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
|
2016-01-06 05:43:33 +00:00
|
|
|
{
|
|
|
|
uint64_t phys_addr = 0;
|
|
|
|
struct nvme_completion_poll_status status;
|
2016-02-08 23:06:31 +00:00
|
|
|
struct spdk_nvme_intel_log_page_directory *log_page_directory;
|
2016-01-06 05:43:33 +00:00
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
|
|
|
|
64, &phys_addr);
|
2016-01-06 05:43:33 +00:00
|
|
|
if (log_page_directory == NULL) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("could not allocate log_page_directory\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
status.done = false;
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG,
|
|
|
|
log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory),
|
|
|
|
nvme_completion_poll_cb,
|
|
|
|
&status);
|
2016-01-06 05:43:33 +00:00
|
|
|
while (status.done == false) {
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
2016-02-09 18:06:48 +00:00
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(log_page_directory);
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_ctrlr_cmd_get_log_page failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(log_page_directory);
|
2016-01-06 05:43:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
|
2016-01-06 05:43:33 +00:00
|
|
|
{
|
2016-01-15 20:21:18 +00:00
|
|
|
memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
|
|
|
|
/* Mandatory pages */
|
2016-02-09 18:06:48 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
|
|
|
|
ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
|
|
|
|
ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
if (ctrlr->cdata.lpa.celp) {
|
2016-02-09 18:06:48 +00:00
|
|
|
ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
2016-02-08 21:08:06 +00:00
|
|
|
if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
|
2016-01-06 05:43:33 +00:00
|
|
|
nvme_ctrlr_set_intel_support_log_pages(ctrlr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-25 05:04:23 +00:00
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
|
2016-01-25 05:04:23 +00:00
|
|
|
{
|
2016-02-08 23:06:31 +00:00
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
|
2016-01-25 05:04:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
|
2016-01-25 05:04:23 +00:00
|
|
|
{
|
|
|
|
memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
|
|
|
|
/* Mandatory features */
|
2016-02-09 18:06:48 +00:00
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
|
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
|
2016-01-25 05:04:23 +00:00
|
|
|
/* Optional features */
|
|
|
|
if (ctrlr->cdata.vwc.present) {
|
2016-02-09 18:06:48 +00:00
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
|
2016-01-25 05:04:23 +00:00
|
|
|
}
|
|
|
|
if (ctrlr->cdata.apsta.supported) {
|
2016-02-09 18:06:48 +00:00
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
|
2016-01-25 05:04:23 +00:00
|
|
|
}
|
|
|
|
if (ctrlr->cdata.hmpre) {
|
2016-02-09 18:06:48 +00:00
|
|
|
ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
|
2016-01-25 05:04:23 +00:00
|
|
|
}
|
2016-02-08 21:08:06 +00:00
|
|
|
if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
|
2016-01-25 05:04:23 +00:00
|
|
|
nvme_ctrlr_set_intel_supported_features(ctrlr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-10-18 16:49:07 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
ctrlr->is_failed = true;
|
2016-10-19 17:19:34 +00:00
|
|
|
nvme_qpair_fail(ctrlr->adminq);
|
2016-10-18 16:49:07 +00:00
|
|
|
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
|
|
|
nvme_qpair_fail(qpair);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-02-09 18:06:48 +00:00
|
|
|
union spdk_nvme_cc_register cc;
|
|
|
|
union spdk_nvme_csts_register csts;
|
2015-09-21 15:52:41 +00:00
|
|
|
int ms_waited = 0;
|
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cc() failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-02-09 18:06:48 +00:00
|
|
|
cc.bits.shn = SPDK_NVME_SHN_NORMAL;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "set_cc() failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/*
|
|
|
|
* The NVMe spec does not define a timeout period
|
|
|
|
* for shutdown notification, so we just pick
|
|
|
|
* 5 seconds as a reasonable amount of time to
|
|
|
|
* wait before proceeding.
|
|
|
|
*/
|
2016-10-13 00:00:54 +00:00
|
|
|
do {
|
|
|
|
if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_csts() failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "shutdown complete\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
nvme_delay(1000);
|
2016-10-13 00:00:54 +00:00
|
|
|
ms_waited++;
|
|
|
|
} while (ms_waited < 5000);
|
|
|
|
|
|
|
|
SPDK_ERRLOG("did not shutdown within 5 seconds\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-02-09 18:06:48 +00:00
|
|
|
union spdk_nvme_cc_register cc;
|
2016-10-19 20:42:21 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = ctrlr->transport->ctrlr_enable(ctrlr);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport ctrlr_enable failed\n");
|
|
|
|
return rc;
|
|
|
|
}
|
2016-10-13 00:00:54 +00:00
|
|
|
|
|
|
|
if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cc() failed\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
if (cc.bits.en != 0) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("%s called with CC.EN = 1\n", __func__);
|
2016-06-14 22:17:33 +00:00
|
|
|
return -EINVAL;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cc.bits.en = 1;
|
|
|
|
cc.bits.css = 0;
|
|
|
|
cc.bits.shn = 0;
|
|
|
|
cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
|
|
|
|
cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
|
|
|
|
|
|
|
|
/* Page size is 2 ^ (12 + mps). */
|
|
|
|
cc.bits.mps = nvme_u32log2(PAGE_SIZE) - 12;
|
|
|
|
|
2016-06-14 07:19:38 +00:00
|
|
|
switch (ctrlr->opts.arb_mechanism) {
|
|
|
|
case SPDK_NVME_CC_AMS_RR:
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_CC_AMS_WRR:
|
2016-10-19 20:18:27 +00:00
|
|
|
if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
|
2016-06-14 07:19:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
case SPDK_NVME_CC_AMS_VS:
|
2016-10-19 20:18:27 +00:00
|
|
|
if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
|
2016-06-14 07:19:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc.bits.ams = ctrlr->opts.arb_mechanism;
|
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "set_cc() failed\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
return 0;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
static void
|
|
|
|
nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
|
|
|
|
uint64_t timeout_in_ms)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-02-23 23:36:13 +00:00
|
|
|
ctrlr->state = state;
|
|
|
|
if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
|
|
|
|
ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
|
2015-09-21 15:52:41 +00:00
|
|
|
} else {
|
2016-08-12 17:24:34 +00:00
|
|
|
ctrlr->state_timeout_tsc = spdk_get_ticks() + (timeout_in_ms * spdk_get_ticks_hz()) / 1000;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-02-23 23:36:13 +00:00
|
|
|
int rc = 0;
|
2016-02-29 21:19:02 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
if (ctrlr->is_resetting || ctrlr->is_failed) {
|
|
|
|
/*
|
|
|
|
* Controller is already resetting or has failed. Return
|
|
|
|
* immediately since there is no need to kick off another
|
|
|
|
* reset in these cases.
|
|
|
|
*/
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-03 00:51:32 +00:00
|
|
|
ctrlr->is_resetting = true;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_NOTICELOG("resetting controller\n");
|
2016-02-23 23:36:13 +00:00
|
|
|
|
|
|
|
/* Disable all queues before disabling the controller hardware. */
|
2016-10-19 17:19:34 +00:00
|
|
|
nvme_qpair_disable(ctrlr->adminq);
|
2016-10-18 16:49:07 +00:00
|
|
|
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
|
|
|
nvme_qpair_disable(qpair);
|
2016-02-23 23:36:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the state back to INIT to cause a full hardware reset. */
|
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
|
|
|
|
|
|
|
|
while (ctrlr->state != NVME_CTRLR_STATE_READY) {
|
|
|
|
if (nvme_ctrlr_process_init(ctrlr) != 0) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("%s: controller reinitialization failed\n", __func__);
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_fail(ctrlr);
|
|
|
|
rc = -1;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 21:19:02 +00:00
|
|
|
if (!ctrlr->is_failed) {
|
|
|
|
/* Reinitialize qpairs */
|
|
|
|
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
2016-10-18 16:49:07 +00:00
|
|
|
if (ctrlr->transport->ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
|
2016-02-29 21:19:02 +00:00
|
|
|
nvme_ctrlr_fail(ctrlr);
|
|
|
|
rc = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-03 00:51:32 +00:00
|
|
|
ctrlr->is_resetting = false;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
2016-02-29 21:33:50 +00:00
|
|
|
int rc;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
status.done = false;
|
2016-02-29 21:33:50 +00:00
|
|
|
rc = nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
|
|
|
|
nvme_completion_poll_cb, &status);
|
|
|
|
if (rc != 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
while (status.done == false) {
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
2016-02-09 18:06:48 +00:00
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_identify_controller failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use MDTS to ensure our default max_xfer_size doesn't exceed what the
|
|
|
|
* controller supports.
|
|
|
|
*/
|
2016-10-19 23:14:09 +00:00
|
|
|
ctrlr->max_xfer_size = ctrlr->transport->ctrlr_get_max_xfer_size(ctrlr);
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
|
2015-09-21 15:52:41 +00:00
|
|
|
if (ctrlr->cdata.mdts > 0) {
|
|
|
|
ctrlr->max_xfer_size = nvme_min(ctrlr->max_xfer_size,
|
|
|
|
ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
|
2016-10-19 23:14:09 +00:00
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int cq_allocated, sq_allocated;
|
2016-02-29 21:33:50 +00:00
|
|
|
int rc;
|
2016-10-18 16:49:07 +00:00
|
|
|
uint32_t i;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
|
2016-03-23 23:33:20 +00:00
|
|
|
if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
|
|
|
|
ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
|
2016-03-23 23:33:20 +00:00
|
|
|
ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
|
|
|
|
} else if (ctrlr->opts.num_io_queues < 1) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_NOTICELOG("Requested num_io_queues 0, increasing to 1\n");
|
2016-03-23 23:33:20 +00:00
|
|
|
ctrlr->opts.num_io_queues = 1;
|
|
|
|
}
|
|
|
|
|
2016-03-07 17:36:17 +00:00
|
|
|
rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
|
2016-02-29 21:33:50 +00:00
|
|
|
nvme_completion_poll_cb, &status);
|
|
|
|
if (rc != 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
while (status.done == false) {
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
2016-02-09 18:06:48 +00:00
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_set_num_queues failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data in cdw0 is 0-based.
|
|
|
|
* Lower 16-bits indicate number of submission queues allocated.
|
|
|
|
* Upper 16-bits indicate number of completion queues allocated.
|
|
|
|
*/
|
|
|
|
sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
|
|
|
|
cq_allocated = (status.cpl.cdw0 >> 16) + 1;
|
|
|
|
|
2016-03-07 17:36:17 +00:00
|
|
|
ctrlr->opts.num_io_queues = nvme_min(sq_allocated, cq_allocated);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-18 16:49:07 +00:00
|
|
|
ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
|
|
|
|
if (ctrlr->free_io_qids == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
|
|
|
|
spdk_bit_array_clear(ctrlr->free_io_qids, 0);
|
|
|
|
for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
|
|
|
|
spdk_bit_array_set(ctrlr->free_io_qids, i);
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-24 21:19:09 +00:00
|
|
|
static int
|
|
|
|
nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
uint32_t keep_alive_interval_ms;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (ctrlr->opts.keep_alive_timeout_ms == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->cdata.kas == 0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Setting keep alive timeout feature to %u ms\n",
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms);
|
|
|
|
|
|
|
|
rc = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER,
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms, 0, NULL, 0,
|
|
|
|
nvme_completion_poll_cb, &status);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Keep alive timeout Set Feature failed: %d\n", rc);
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms = 0;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (status.done == false) {
|
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Keep alive timeout Set Feature failed: SC %x SCT %x\n",
|
|
|
|
status.cpl.status.sc, status.cpl.status.sct);
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms = 0;
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
|
|
|
|
rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
|
|
|
|
nvme_completion_poll_cb, &status);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Keep alive timeout Get Feature failed: %d\n", rc);
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms = 0;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (status.done == false) {
|
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Keep alive timeout Get Feature failed: SC %x SCT %x\n",
|
|
|
|
status.cpl.status.sc, status.cpl.status.sct);
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms = 0;
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->opts.keep_alive_timeout_ms != status.cpl.cdw0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Controller adjusted keep alive timeout to %u ms\n",
|
|
|
|
status.cpl.cdw0);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrlr->opts.keep_alive_timeout_ms = status.cpl.cdw0;
|
|
|
|
|
|
|
|
keep_alive_interval_ms = ctrlr->opts.keep_alive_timeout_ms / 2;
|
|
|
|
if (keep_alive_interval_ms == 0) {
|
|
|
|
keep_alive_interval_ms = 1;
|
|
|
|
}
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Sending keep alive every %u ms\n", keep_alive_interval_ms);
|
|
|
|
|
|
|
|
ctrlr->keep_alive_interval_ticks = (keep_alive_interval_ms * UINT64_C(1000)) / spdk_get_ticks_hz();
|
|
|
|
|
|
|
|
/* Schedule the first Keep Alive to be sent as soon as possible. */
|
|
|
|
ctrlr->next_keep_alive_tick = spdk_get_ticks();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
static void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
if (ctrlr->ns) {
|
|
|
|
uint32_t i, num_ns = ctrlr->num_ns;
|
|
|
|
|
|
|
|
for (i = 0; i < num_ns; i++) {
|
|
|
|
nvme_ns_destruct(&ctrlr->ns[i]);
|
|
|
|
}
|
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(ctrlr->ns);
|
2015-09-21 15:52:41 +00:00
|
|
|
ctrlr->ns = NULL;
|
|
|
|
ctrlr->num_ns = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->nsdata) {
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_free(ctrlr->nsdata);
|
2015-09-21 15:52:41 +00:00
|
|
|
ctrlr->nsdata = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
uint32_t i, nn = ctrlr->cdata.nn;
|
|
|
|
uint64_t phys_addr = 0;
|
|
|
|
|
|
|
|
if (nn == 0) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("controller has 0 namespaces\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ctrlr->num_ns may be 0 (startup) or a different number of namespaces (reset),
|
|
|
|
* so check if we need to reallocate.
|
|
|
|
*/
|
|
|
|
if (nn != ctrlr->num_ns) {
|
|
|
|
nvme_ctrlr_destruct_namespaces(ctrlr);
|
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64,
|
|
|
|
&phys_addr);
|
2015-09-21 15:52:41 +00:00
|
|
|
if (ctrlr->ns == NULL) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2016-08-12 17:24:34 +00:00
|
|
|
ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
|
|
|
|
&phys_addr);
|
2015-09-21 15:52:41 +00:00
|
|
|
if (ctrlr->nsdata == NULL) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrlr->num_ns = nn;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nn; i++) {
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ns *ns = &ctrlr->ns[i];
|
2015-09-21 15:52:41 +00:00
|
|
|
uint32_t nsid = i + 1;
|
|
|
|
|
|
|
|
if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
nvme_ctrlr_destruct_namespaces(ctrlr);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-09 18:06:48 +00:00
|
|
|
nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
struct nvme_async_event_request *aer = arg;
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr = aer->ctrlr;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-09 18:06:48 +00:00
|
|
|
if (cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
|
2015-09-21 15:52:41 +00:00
|
|
|
/*
|
|
|
|
* This is simulated when controller is being shut down, to
|
|
|
|
* effectively abort outstanding asynchronous event requests
|
|
|
|
* and make sure all memory is freed. Do not repost the
|
|
|
|
* request in this case.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->aer_cb_fn != NULL) {
|
|
|
|
ctrlr->aer_cb_fn(ctrlr->aer_cb_arg, cpl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Repost another asynchronous event request to replace the one
|
|
|
|
* that just completed.
|
|
|
|
*/
|
2016-01-06 20:45:25 +00:00
|
|
|
if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
|
|
|
|
/*
|
|
|
|
* We can't do anything to recover from a failure here,
|
|
|
|
* so just print a warning message and leave the AER unsubmitted.
|
|
|
|
*/
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("resubmitting AER failed!\n");
|
2016-01-06 20:45:25 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-01-06 20:45:25 +00:00
|
|
|
static int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_async_event_request *aer)
|
|
|
|
{
|
|
|
|
struct nvme_request *req;
|
|
|
|
|
|
|
|
aer->ctrlr = ctrlr;
|
2016-01-22 23:56:20 +00:00
|
|
|
req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
|
2015-09-21 15:52:41 +00:00
|
|
|
aer->req = req;
|
2016-01-06 20:45:25 +00:00
|
|
|
if (req == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-09 18:06:48 +00:00
|
|
|
req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
|
2016-03-08 22:16:09 +00:00
|
|
|
return nvme_ctrlr_submit_admin_request(ctrlr, req);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-02-09 18:06:48 +00:00
|
|
|
union spdk_nvme_critical_warning_state state;
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_async_event_request *aer;
|
|
|
|
uint32_t i;
|
|
|
|
struct nvme_completion_poll_status status;
|
2016-02-29 21:33:50 +00:00
|
|
|
int rc;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
|
|
|
|
state.raw = 0xFF;
|
|
|
|
state.bits.reserved = 0;
|
2016-02-29 21:33:50 +00:00
|
|
|
rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, nvme_completion_poll_cb, &status);
|
|
|
|
if (rc != 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
while (status.done == false) {
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
2016-02-09 18:06:48 +00:00
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_ctrlr_cmd_set_async_event_config failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* aerl is a zero-based value, so we need to add 1 here. */
|
|
|
|
ctrlr->num_aers = nvme_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
|
|
|
|
|
|
|
|
for (i = 0; i < ctrlr->num_aers; i++) {
|
|
|
|
aer = &ctrlr->aer[i];
|
2016-01-06 20:45:25 +00:00
|
|
|
if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_ctrlr_construct_and_submit_aer failed!\n");
|
2016-01-06 20:45:25 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
/**
|
|
|
|
* This function will be called repeatedly during initialization until the controller is ready.
|
|
|
|
*/
|
2015-09-21 15:52:41 +00:00
|
|
|
int
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-02-23 23:36:13 +00:00
|
|
|
union spdk_nvme_cc_register cc;
|
|
|
|
union spdk_nvme_csts_register csts;
|
|
|
|
uint32_t ready_timeout_in_ms;
|
|
|
|
int rc;
|
|
|
|
|
2016-10-27 06:34:32 +00:00
|
|
|
/*
|
|
|
|
* May need to avoid accessing any register on the target controller
|
|
|
|
* for a while. Return early without touching the FSM.
|
|
|
|
* Check sleep_timeout_tsc > 0 for unit test.
|
|
|
|
*/
|
|
|
|
if ((ctrlr->sleep_timeout_tsc > 0) &&
|
|
|
|
(spdk_get_ticks() <= ctrlr->sleep_timeout_tsc)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
ctrlr->sleep_timeout_tsc = 0;
|
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_get_cc(ctrlr, &cc) ||
|
2016-10-19 20:18:27 +00:00
|
|
|
nvme_ctrlr_get_csts(ctrlr, &csts)) {
|
2016-10-13 00:00:54 +00:00
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "get registers failed\n");
|
|
|
|
nvme_ctrlr_fail(ctrlr);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2016-02-23 23:36:13 +00:00
|
|
|
|
2016-10-19 20:18:27 +00:00
|
|
|
ready_timeout_in_ms = 500 * ctrlr->cap.bits.to;
|
2016-02-23 23:36:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the current initialization step is done or has timed out.
|
|
|
|
*/
|
|
|
|
switch (ctrlr->state) {
|
|
|
|
case NVME_CTRLR_STATE_INIT:
|
|
|
|
/* Begin the hardware initialization by making sure the controller is disabled. */
|
|
|
|
if (cc.bits.en) {
|
|
|
|
/*
|
|
|
|
* Controller is currently enabled. We need to disable it to cause a reset.
|
|
|
|
*
|
|
|
|
* If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
|
|
|
|
* Wait for the ready bit to be 1 before disabling the controller.
|
|
|
|
*/
|
|
|
|
if (csts.bits.rdy == 0) {
|
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CC.EN = 1 && CSTS.RDY == 1, so we can immediately disable the controller. */
|
|
|
|
cc.bits.en = 0;
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "set_cc() failed\n");
|
|
|
|
nvme_ctrlr_fail(ctrlr);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
|
2016-10-27 06:34:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait 2 secsonds before accessing PCI registers.
|
|
|
|
* Not using sleep() to avoid blocking other controller's initialization.
|
|
|
|
*/
|
|
|
|
if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
|
|
|
|
ctrlr->sleep_timeout_tsc = spdk_get_ticks() + 2 * spdk_get_ticks_hz();
|
|
|
|
}
|
2016-02-23 23:36:13 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
2016-04-08 16:13:10 +00:00
|
|
|
if (csts.bits.rdy == 1) {
|
|
|
|
/*
|
|
|
|
* Controller is in the process of shutting down.
|
|
|
|
* We need to wait for RDY to become 0.
|
|
|
|
*/
|
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
/*
|
|
|
|
* Controller is currently disabled. We can jump straight to enabling it.
|
|
|
|
*/
|
2016-06-21 20:37:15 +00:00
|
|
|
rc = nvme_ctrlr_enable(ctrlr);
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
|
2016-06-21 20:37:15 +00:00
|
|
|
return rc;
|
2016-02-23 23:36:13 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
|
|
|
|
if (csts.bits.rdy == 1) {
|
|
|
|
/* CC.EN = 1 && CSTS.RDY = 1, so we can set CC.EN = 0 now. */
|
|
|
|
cc.bits.en = 0;
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_set_cc(ctrlr, &cc)) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "set_cc() failed\n");
|
|
|
|
nvme_ctrlr_fail(ctrlr);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0, ready_timeout_in_ms);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
|
|
|
|
if (csts.bits.rdy == 0) {
|
|
|
|
/* CC.EN = 0 && CSTS.RDY = 0, so we can enable the controller now. */
|
2016-06-21 20:37:15 +00:00
|
|
|
rc = nvme_ctrlr_enable(ctrlr);
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1, ready_timeout_in_ms);
|
2016-06-21 20:37:15 +00:00
|
|
|
return rc;
|
2016-02-23 23:36:13 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
|
|
|
|
if (csts.bits.rdy == 1) {
|
|
|
|
/*
|
|
|
|
* The controller has been enabled.
|
|
|
|
* Perform the rest of initialization in nvme_ctrlr_start() serially.
|
|
|
|
*/
|
|
|
|
rc = nvme_ctrlr_start(ctrlr);
|
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2016-08-08 20:55:27 +00:00
|
|
|
assert(0);
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_fail(ctrlr);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
|
2016-08-12 17:24:34 +00:00
|
|
|
spdk_get_ticks() > ctrlr->state_timeout_tsc) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("Initialization timed out in state %d\n", ctrlr->state);
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_fail(ctrlr);
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
2016-10-19 17:19:34 +00:00
|
|
|
ctrlr->transport->qpair_reset(ctrlr->adminq);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-19 17:19:34 +00:00
|
|
|
nvme_qpair_enable(ctrlr->adminq);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
if (nvme_ctrlr_identify(ctrlr) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvme_ctrlr_configure_aer(ctrlr) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-06 05:43:33 +00:00
|
|
|
nvme_ctrlr_set_supported_log_pages(ctrlr);
|
2016-01-25 05:04:23 +00:00
|
|
|
nvme_ctrlr_set_supported_features(ctrlr);
|
2016-03-01 02:50:31 +00:00
|
|
|
|
|
|
|
if (ctrlr->cdata.sgls.supported) {
|
|
|
|
ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
2016-10-24 21:19:09 +00:00
|
|
|
if (nvme_ctrlr_set_keep_alive_timeout(ctrlr) != 0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Setting keep alive timeout failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-17 03:26:55 +00:00
|
|
|
int
|
|
|
|
nvme_mutex_init_recursive_shared(pthread_mutex_t *mtx)
|
2016-08-08 17:03:52 +00:00
|
|
|
{
|
|
|
|
pthread_mutexattr_t attr;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (pthread_mutexattr_init(&attr)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
|
2016-10-17 03:26:55 +00:00
|
|
|
pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_init(mtx, &attr)) {
|
|
|
|
rc = -1;
|
|
|
|
}
|
|
|
|
pthread_mutexattr_destroy(&attr);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
int
|
2016-10-18 19:50:43 +00:00
|
|
|
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-10-24 23:29:47 +00:00
|
|
|
struct pci_id pci_id;
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
|
2016-05-05 01:41:16 +00:00
|
|
|
ctrlr->flags = 0;
|
2016-10-18 16:49:07 +00:00
|
|
|
ctrlr->free_io_qids = NULL;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-19 20:18:27 +00:00
|
|
|
ctrlr->min_page_size = 1 << (12 + ctrlr->cap.bits.mpsmin);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2015-11-03 00:51:32 +00:00
|
|
|
ctrlr->is_resetting = false;
|
2015-09-21 15:52:41 +00:00
|
|
|
ctrlr->is_failed = false;
|
|
|
|
|
2016-02-29 21:19:02 +00:00
|
|
|
TAILQ_INIT(&ctrlr->active_io_qpairs);
|
|
|
|
|
2016-10-17 03:26:55 +00:00
|
|
|
nvme_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-24 23:29:47 +00:00
|
|
|
if (ctrlr->transport->ctrlr_get_pci_id(ctrlr, &pci_id) == 0) {
|
|
|
|
ctrlr->quirks = nvme_get_quirks(&pci_id);
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-04-08 22:22:46 +00:00
|
|
|
while (!TAILQ_EMPTY(&ctrlr->active_io_qpairs)) {
|
|
|
|
struct spdk_nvme_qpair *qpair = TAILQ_FIRST(&ctrlr->active_io_qpairs);
|
|
|
|
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
2016-10-18 16:49:07 +00:00
|
|
|
nvme_qpair_destroy(qpair);
|
2016-04-08 22:22:46 +00:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
nvme_ctrlr_shutdown(ctrlr);
|
|
|
|
|
|
|
|
nvme_ctrlr_destruct_namespaces(ctrlr);
|
|
|
|
|
2016-10-18 16:49:07 +00:00
|
|
|
spdk_bit_array_free(&ctrlr->free_io_qids);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_destroy(&ctrlr->ctrlr_lock);
|
2016-10-13 23:08:22 +00:00
|
|
|
|
|
|
|
ctrlr->transport->ctrlr_destruct(ctrlr);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-03-08 22:16:09 +00:00
|
|
|
int
|
2016-02-10 18:26:12 +00:00
|
|
|
nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_request *req)
|
|
|
|
{
|
2016-10-19 17:19:34 +00:00
|
|
|
return nvme_qpair_submit_request(ctrlr->adminq, req);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-10-24 21:19:09 +00:00
|
|
|
static void
|
|
|
|
nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
/* Do nothing */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if we need to send a Keep Alive command.
|
|
|
|
* Caller must hold ctrlr->ctrlr_lock.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
|
|
|
uint64_t now;
|
|
|
|
struct nvme_request *req;
|
|
|
|
struct spdk_nvme_cmd *cmd;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
now = spdk_get_ticks();
|
|
|
|
if (now < ctrlr->next_keep_alive_tick) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
req = nvme_allocate_request_null(nvme_keep_alive_completion, NULL);
|
|
|
|
if (req == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = &req->cmd;
|
|
|
|
cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
|
|
|
|
|
|
|
|
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_TRACELOG(SPDK_TRACE_NVME, "Submitting Keep Alive failed\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
|
|
|
|
}
|
|
|
|
|
2016-01-11 16:10:02 +00:00
|
|
|
int32_t
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2016-01-11 16:10:02 +00:00
|
|
|
int32_t num_completions;
|
|
|
|
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-24 21:19:09 +00:00
|
|
|
if (ctrlr->keep_alive_interval_ticks) {
|
|
|
|
nvme_ctrlr_keep_alive(ctrlr);
|
|
|
|
}
|
2016-10-19 17:19:34 +00:00
|
|
|
num_completions = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-01-11 16:10:02 +00:00
|
|
|
|
|
|
|
return num_completions;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-02-09 18:06:48 +00:00
|
|
|
const struct spdk_nvme_ctrlr_data *
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
return &ctrlr->cdata;
|
|
|
|
}
|
|
|
|
|
2016-07-06 21:55:28 +00:00
|
|
|
union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
2016-10-19 20:18:27 +00:00
|
|
|
return ctrlr->cap;
|
2016-07-06 21:55:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
|
|
|
union spdk_nvme_vs_register vs;
|
|
|
|
|
2016-10-13 00:00:54 +00:00
|
|
|
if (nvme_ctrlr_get_vs(ctrlr, &vs)) {
|
|
|
|
vs.raw = 0;
|
|
|
|
}
|
2016-07-06 21:55:28 +00:00
|
|
|
return vs;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
uint32_t
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
return ctrlr->num_ns;
|
|
|
|
}
|
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ns *
|
|
|
|
spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t ns_id)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
if (ns_id < 1 || ns_id > ctrlr->num_ns) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ctrlr->ns[ns_id - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
spdk_nvme_aer_cb aer_cb_fn,
|
|
|
|
void *aer_cb_arg)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
ctrlr->aer_cb_fn = aer_cb_fn;
|
|
|
|
ctrlr->aer_cb_arg = aer_cb_arg;
|
|
|
|
}
|
2016-01-06 05:43:33 +00:00
|
|
|
|
|
|
|
bool
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
|
2016-01-06 05:43:33 +00:00
|
|
|
{
|
2016-01-15 20:21:18 +00:00
|
|
|
/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
|
|
|
|
SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
|
|
|
|
return ctrlr->log_page_supported[log_page];
|
2016-01-06 05:43:33 +00:00
|
|
|
}
|
2016-01-25 05:04:23 +00:00
|
|
|
|
|
|
|
bool
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
|
2016-01-25 05:04:23 +00:00
|
|
|
{
|
|
|
|
/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
|
|
|
|
SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
|
|
|
|
return ctrlr->feature_supported[feature_code];
|
|
|
|
}
|
2016-02-25 03:44:44 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
|
|
|
struct spdk_nvme_ctrlr_list *payload)
|
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
|
|
|
|
nvme_completion_poll_cb, &status);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvme_ctrlr_reset(ctrlr);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
|
|
|
struct spdk_nvme_ctrlr_list *payload)
|
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
|
|
|
|
nvme_completion_poll_cb, &status);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvme_ctrlr_reset(ctrlr);
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:05:38 +00:00
|
|
|
uint32_t
|
2016-02-25 03:44:44 +00:00
|
|
|
spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
|
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
|
|
|
|
if (res)
|
2016-05-19 00:05:38 +00:00
|
|
|
return 0;
|
2016-02-25 03:44:44 +00:00
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
|
2016-05-19 00:05:38 +00:00
|
|
|
return 0;
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
|
2016-05-19 00:05:38 +00:00
|
|
|
res = spdk_nvme_ctrlr_reset(ctrlr);
|
|
|
|
if (res) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the namespace ID that was created */
|
|
|
|
return status.cpl.cdw0;
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
|
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-02-25 03:44:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvme_ctrlr_reset(ctrlr);
|
|
|
|
}
|
2016-03-07 06:29:50 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
|
|
|
struct spdk_nvme_format *format)
|
|
|
|
{
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
|
|
|
|
&status);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-03-07 06:29:50 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-03-07 06:29:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvme_ctrlr_reset(ctrlr);
|
|
|
|
}
|
2016-05-03 05:18:39 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
|
|
|
|
int slot)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_fw_commit fw_commit;
|
|
|
|
struct nvme_completion_poll_status status;
|
|
|
|
int res;
|
|
|
|
unsigned int size_remaining;
|
|
|
|
unsigned int offset;
|
|
|
|
unsigned int transfer;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
if (size % 4) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_update_firmware invalid size!\n");
|
2016-05-03 05:18:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Firmware download */
|
|
|
|
size_remaining = size;
|
|
|
|
offset = 0;
|
|
|
|
p = payload;
|
|
|
|
|
|
|
|
while (size_remaining > 0) {
|
|
|
|
transfer = nvme_min(size_remaining, ctrlr->min_page_size);
|
|
|
|
status.done = false;
|
|
|
|
|
|
|
|
res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
|
|
|
|
nvme_completion_poll_cb,
|
|
|
|
&status);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-05-03 05:18:39 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-05-03 05:18:39 +00:00
|
|
|
}
|
|
|
|
p += transfer;
|
|
|
|
offset += transfer;
|
|
|
|
size_remaining -= transfer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Firmware commit */
|
|
|
|
memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
|
|
|
|
fw_commit.fs = slot;
|
|
|
|
fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
|
|
|
|
|
|
|
|
status.done = false;
|
|
|
|
|
|
|
|
res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
|
|
|
|
&status);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
while (status.done == false) {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_lock(&ctrlr->ctrlr_lock);
|
2016-10-19 17:19:34 +00:00
|
|
|
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
|
2016-05-03 05:18:39 +00:00
|
|
|
}
|
|
|
|
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
2016-08-12 16:03:40 +00:00
|
|
|
SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
|
2016-06-14 22:17:33 +00:00
|
|
|
return -ENXIO;
|
2016-05-03 05:18:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvme_ctrlr_reset(ctrlr);
|
|
|
|
}
|