NVMe: Add public API to get log pages.

1 Add supported log pages data structure.
2 Bulid up supported log pages when NVME start.
3 Provide unified API for getting log pages.
3 Unit test suit optimization base on above modification.

Change-Id: I03cdb93f5c94e6897510d7f19bc7d9f4e70f9222
Signed-off-by: Cunyin Chang <cunyin.chang@intel.com>
This commit is contained in:
Cunyin Chang 2016-01-06 13:43:33 +08:00
parent b61b099030
commit 9945c00cf2
10 changed files with 404 additions and 74 deletions

View File

@ -107,6 +107,13 @@ const struct nvme_controller_data *nvme_ctrlr_get_data(struct nvme_controller *c
*/
uint32_t nvme_ctrlr_get_num_ns(struct nvme_controller *ctrlr);
/**
* \brief Figure out if it is supported for the given NVMe controller and page identifier.
*
* This function is thread safe and can be called at any point after nvme_attach().
*/
bool nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, int log_page);
/**
* Signature for callback function invoked when a command is completed.
*
@ -219,6 +226,25 @@ struct nvme_namespace;
*/
struct nvme_namespace *nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t ns_id);
/**
* \brief Get a specific log page from the NVMe controllerr.
*
* This function can be called at any point after nvme_attach().
* \param log_page - the log page identifier.
* \param nsid - this is one of the cases where NVME_GLOBAL_NAMESPACE_TAG can
* be used for certain log pages.
* \param payload - the pointer to the payload buffer.
* \param payload_size - the size of payload buffer.
* \param cb_fn - call back funciton pointer which will be called after get log page.
* \param cb_arg - input parameter for the cb_fn.
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
*/
void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
uint8_t log_page, uint32_t nsid,
void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
/**
* \brief Get the identify namespace data as defined by the NVMe specification.
*

View File

@ -81,6 +81,20 @@ enum nvme_intel_smart_attribute_code {
NVME_INTEL_SMART_HOST_BYTES_WRITTEN = 0xF5,
};
struct nvme_intel_log_page_directory {
uint8_t version[2];
uint8_t reserved[384];
uint8_t read_latency_log_len;
uint8_t reserved2;
uint8_t write_latency_log_len;
uint8_t reserved3[5];
uint8_t temperature_statistics_log_len;
uint8_t reserved4[9];
uint8_t smart_log_len;
uint8_t reserved5[107];
};
SPDK_STATIC_ASSERT(sizeof(struct nvme_intel_log_page_directory) == 512, "Incorrect size");
struct nvme_intel_rw_latency_page {
uint16_t major_revison;
uint16_t minor_revison;

View File

@ -573,8 +573,9 @@ struct __attribute__((packed)) nvme_controller_data {
struct {
/* per namespace smart/health log page */
uint8_t ns_smart : 1;
uint8_t lpa_rsvd : 7;
/* command effects log page */
uint8_t celp : 1;
uint8_t lpa_rsvd : 6;
} lpa;
/** error log page entries */
@ -876,8 +877,11 @@ enum nvme_log_page {
NVME_LOG_ERROR = 0x01,
NVME_LOG_HEALTH_INFORMATION = 0x02,
NVME_LOG_FIRMWARE_SLOT = 0x03,
/* 0x04-0x7F - reserved */
/* 0x80-0xBF - I/O command set specific */
NVME_LOG_CHANGED_NS_LIST = 0x04,
NVME_LOG_COMMAND_EFFECTS_LOG = 0x05,
/* 0x06-0x7F - reserved */
NVME_LOG_RESERVATION_NOTIFICATION = 0x80,
/* 0x81-0xBF - I/O command set specific */
/* 0xC0-0xFF - vendor specific */
};

39
include/spdk/pci_ids.h Normal file
View File

@ -0,0 +1,39 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PCI_IDS_H__
#define __PCI_IDS_H__
#define PCI_VENDOR_ID_INTEL 0x8086
#endif /* __PCI_IDS_H__ */

View File

@ -34,6 +34,8 @@
#ifndef __IOAT_PCI_H__
#define __IOAT_PCI_H__
#include "spdk/pci_ids.h"
#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
@ -88,7 +90,5 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
#define PCI_VENDOR_ID_INTEL 0x8086
#endif /* __IOAT_PCI_H__ */

View File

@ -32,7 +32,7 @@
*/
#include "nvme_internal.h"
#include "spdk/nvme_intel.h"
/**
* \file
*
@ -41,6 +41,84 @@
static int nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
struct nvme_async_event_request *aer);
static void
nvme_ctrlr_construct_intel_support_log_page_list(struct nvme_controller *ctrlr,
struct nvme_intel_log_page_directory *log_page_directory)
{
int i = 0;
if (ctrlr->cdata.vid != PCI_VENDOR_ID_INTEL || log_page_directory == NULL)
return;
ctrlr->supported_log_pages.vendor_specific_page_id[i] = NVME_INTEL_LOG_PAGE_DIRECTORY;
i++;
if (log_page_directory->read_latency_log_len) {
ctrlr->supported_log_pages.vendor_specific_page_id[i] = NVME_INTEL_LOG_READ_CMD_LATENCY;
i++;
}
if (log_page_directory->write_latency_log_len) {
ctrlr->supported_log_pages.vendor_specific_page_id[i] = NVME_INTEL_LOG_WRITE_CMD_LATENCY;
i++;
}
if (log_page_directory->temperature_statistics_log_len) {
ctrlr->supported_log_pages.vendor_specific_page_id[i] = NVME_INTEL_LOG_TEMPERATURE;
i++;
}
if (log_page_directory->smart_log_len) {
ctrlr->supported_log_pages.vendor_specific_page_id[i] = NVME_INTEL_LOG_SMART;
}
}
static int nvme_ctrlr_set_intel_support_log_pages(struct nvme_controller *ctrlr)
{
uint64_t phys_addr = 0;
struct nvme_completion_poll_status status;
struct nvme_intel_log_page_directory *log_page_directory;
log_page_directory = nvme_malloc("nvme_log_page_directory",
sizeof(struct nvme_intel_log_page_directory),
64, &phys_addr);
if (log_page_directory == NULL) {
nvme_printf(NULL, "could not allocate log_page_directory\n");
return ENXIO;
}
status.done = false;
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_INTEL_LOG_PAGE_DIRECTORY, NVME_GLOBAL_NAMESPACE_TAG,
log_page_directory, sizeof(struct nvme_intel_log_page_directory),
nvme_completion_poll_cb,
&status);
while (status.done == false) {
nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (nvme_completion_is_error(&status.cpl)) {
nvme_free(log_page_directory);
nvme_printf(ctrlr, "nvme_ctrlr_cmd_get_log_page failed!\n");
return ENXIO;
}
nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
nvme_free(log_page_directory);
return 0;
}
static void
nvme_ctrlr_set_supported_log_pages(struct nvme_controller *ctrlr)
{
memset(&ctrlr->supported_log_pages, 0, sizeof(struct nvme_supported_log_pages));
ctrlr->supported_log_pages.vendor_id = ctrlr->cdata.vid;
ctrlr->supported_log_pages.generic_page_id[0] = NVME_LOG_ERROR;
ctrlr->supported_log_pages.generic_page_id[1] = NVME_LOG_HEALTH_INFORMATION;
ctrlr->supported_log_pages.generic_page_id[2] = NVME_LOG_FIRMWARE_SLOT;
if (ctrlr->cdata.lpa.celp) {
ctrlr->supported_log_pages.generic_page_id[3] = NVME_LOG_COMMAND_EFFECTS_LOG;
}
if (ctrlr->supported_log_pages.vendor_id == PCI_VENDOR_ID_INTEL) {
nvme_ctrlr_set_intel_support_log_pages(ctrlr);
}
}
static int
nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
{
@ -620,6 +698,7 @@ nvme_ctrlr_start(struct nvme_controller *ctrlr)
return -1;
}
nvme_ctrlr_set_supported_log_pages(ctrlr);
return 0;
}
@ -782,3 +861,34 @@ nvme_ctrlr_register_aer_callback(struct nvme_controller *ctrlr,
ctrlr->aer_cb_fn = aer_cb_fn;
ctrlr->aer_cb_arg = aer_cb_arg;
}
bool
nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, int log_page)
{
unsigned int i = 0;
while (i < sizeof(ctrlr->supported_log_pages.generic_page_id)) {
if (log_page == ctrlr->supported_log_pages.generic_page_id[i]) {
return true;
}
i++;
}
i = 0;
while (i < sizeof(ctrlr->supported_log_pages.command_set_page_id)) {
if (log_page == ctrlr->supported_log_pages.command_set_page_id[i]) {
return true;
}
i++;
}
i = 0;
while (i < sizeof(ctrlr->supported_log_pages.vendor_specific_page_id)) {
if (log_page == ctrlr->supported_log_pages.vendor_specific_page_id[i]) {
return true;
}
i++;
}
return false;
}

View File

@ -241,6 +241,7 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
struct nvme_request *req;
struct nvme_command *cmd;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request(payload, payload_size, cb_fn, cb_arg);
cmd = &req->cmd;
@ -250,47 +251,7 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
cmd->cdw10 |= log_page;
nvme_ctrlr_submit_admin_request(ctrlr, req);
}
void
nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
struct nvme_error_information_entry *payload, uint32_t num_entries,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
nvme_assert(num_entries > 0, ("%s called with num_entries==0\n", __func__));
/* Controller's error log page entries is 0-based. */
nvme_assert(num_entries <= (ctrlr->cdata.elpe + 1u),
("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
num_entries, ctrlr->cdata.elpe + 1));
if (num_entries > (ctrlr->cdata.elpe + 1u))
num_entries = ctrlr->cdata.elpe + 1u;
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,
cb_fn, cb_arg);
}
void
nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
uint32_t nsid, struct nvme_health_information_page *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
nsid, payload, sizeof(*payload), cb_fn, cb_arg);
}
void
nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
{
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
cb_arg);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
}
void

View File

@ -52,6 +52,8 @@
#include "spdk/queue.h"
#include "spdk/barrier.h"
#include "spdk/mmio.h"
#include "spdk/pci_ids.h"
#include "spdk/nvme_intel.h"
#define NVME_MAX_PRP_LIST_ENTRIES (32)
@ -230,6 +232,23 @@ struct nvme_namespace {
uint16_t flags;
};
/** \brief supported log pages. */
struct nvme_supported_log_pages {
uint32_t vendor_id;
/**
* List of supported generic log page IDs, terminated with 0.
*/
uint8_t generic_page_id[128];
/**
* List of supported command set page IDs, terminated with 0.
*/
uint8_t command_set_page_id[64];
/**
* List of supported vendor specific page IDs, terminated with 0.
*/
uint8_t vendor_specific_page_id[64];
};
/*
* One of these per allocated PCI device.
*/
@ -253,6 +272,9 @@ struct nvme_controller {
/* Cold data (not accessed in normal I/O path) is after this point. */
/** All the log pages supported */
struct nvme_supported_log_pages supported_log_pages;
/* Opaque handle to associated PCI device. */
void *devhandle;
@ -342,31 +364,12 @@ void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
uint8_t feature, uint32_t cdw11,
void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
uint8_t log_page, uint32_t nsid,
void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
void *payload,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
uint16_t nsid, void *payload,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
struct nvme_error_information_entry *payload,
uint32_t num_entries, /* 0 = max */
nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
uint32_t nsid,
struct nvme_health_information_page *payload,
nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
struct nvme_firmware_page *payload,
nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
struct nvme_qpair *io_que,
nvme_cb_fn_t cb_fn, void *cb_arg);

View File

@ -50,6 +50,12 @@ int nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id,
{
return 0;
}
void
nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
void *cb_arg)
{
}
void
nvme_qpair_fail(struct nvme_qpair *qpair)
@ -175,6 +181,34 @@ test_nvme_ctrlr_fail(void)
CU_ASSERT(ctrlr.is_failed == true);
}
static void
test_nvme_ctrlr_construct_intel_support_log_page_list(void)
{
bool res;
struct nvme_controller ctrlr = {};
struct nvme_intel_log_page_directory payload = {};
/* set a invalid vendor id */
ctrlr.cdata.vid = 0xFFFF;
memset(&payload, 0, sizeof(struct nvme_intel_log_page_directory));
nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, NVME_INTEL_LOG_TEMPERATURE);
CU_ASSERT(res == false);
ctrlr.cdata.vid = PCI_VENDOR_ID_INTEL;
payload.temperature_statistics_log_len = 1;
nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, NVME_INTEL_LOG_PAGE_DIRECTORY);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, NVME_INTEL_LOG_TEMPERATURE);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, NVME_INTEL_LOG_READ_CMD_LATENCY);
CU_ASSERT(res == false);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, NVME_INTEL_LOG_SMART);
CU_ASSERT(res == false);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -192,6 +226,8 @@ int main(int argc, char **argv)
if (
CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL
) {
CU_cleanup_registry();
return CU_get_error();

View File

@ -118,6 +118,62 @@ static void verify_io_raw_cmd(struct nvme_request *req)
CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
}
static void verify_intel_smart_log_page(struct nvme_request *req)
{
uint32_t temp_cdw10;
CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE);
CU_ASSERT(req->cmd.nsid == health_log_nsid);
temp_cdw10 = ((sizeof(struct nvme_intel_smart_information_page) / sizeof(uint32_t) - 1) << 16) |
NVME_INTEL_LOG_SMART;
CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
}
static void verify_intel_temperature_log_page(struct nvme_request *req)
{
uint32_t temp_cdw10;
CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE);
temp_cdw10 = ((sizeof(struct nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) |
NVME_INTEL_LOG_TEMPERATURE;
CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
}
static void verify_intel_read_latency_log_page(struct nvme_request *req)
{
uint32_t temp_cdw10;
CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE);
temp_cdw10 = ((sizeof(struct nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
NVME_INTEL_LOG_READ_CMD_LATENCY;
CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
}
static void verify_intel_write_latency_log_page(struct nvme_request *req)
{
uint32_t temp_cdw10;
CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE);
temp_cdw10 = ((sizeof(struct nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
NVME_INTEL_LOG_WRITE_CMD_LATENCY;
CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
}
static void verify_intel_get_log_page_directory(struct nvme_request *req)
{
uint32_t temp_cdw10;
CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE);
temp_cdw10 = ((sizeof(struct nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) |
NVME_INTEL_LOG_PAGE_DIRECTORY;
CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
}
struct nvme_request *
nvme_allocate_request(void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg)
@ -158,7 +214,6 @@ nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, struct nvme_reque
memset(req, 0, sizeof(*req));
}
static void
test_firmware_get_log_page(void)
{
@ -167,7 +222,9 @@ test_firmware_get_log_page(void)
verify_fn = verify_firmware_log_page;
nvme_ctrlr_cmd_get_firmware_page(&ctrlr, &payload, NULL, NULL);
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_LOG_FIRMWARE_SLOT, NVME_GLOBAL_NAMESPACE_TAG,
&payload,
sizeof(payload), NULL, NULL);
}
static void
@ -178,7 +235,8 @@ test_health_get_log_page(void)
verify_fn = verify_health_log_page;
nvme_ctrlr_cmd_get_health_information_page(&ctrlr, health_log_nsid, &payload, NULL, NULL);
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_LOG_HEALTH_INFORMATION, health_log_nsid, &payload,
sizeof(payload), NULL, NULL);
}
static void
@ -193,7 +251,82 @@ test_error_get_log_page(void)
/* valid page */
error_num_entries = 1;
nvme_ctrlr_cmd_get_error_page(&ctrlr, &payload, error_num_entries, NULL, NULL);
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_LOG_ERROR, NVME_GLOBAL_NAMESPACE_TAG, &payload,
sizeof(payload), NULL, NULL);
}
static void test_intel_smart_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct nvme_intel_smart_information_page payload = {};
verify_fn = verify_intel_smart_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
sizeof(payload), NULL, NULL);
}
static void test_intel_temperature_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct nvme_intel_temperature_page payload = {};
verify_fn = verify_intel_temperature_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_INTEL_LOG_TEMPERATURE, NVME_GLOBAL_NAMESPACE_TAG,
&payload,
sizeof(payload), NULL, NULL);
}
static void test_intel_read_latency_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct nvme_intel_rw_latency_page payload = {};
verify_fn = verify_intel_read_latency_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_INTEL_LOG_READ_CMD_LATENCY,
NVME_GLOBAL_NAMESPACE_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_intel_write_latency_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct nvme_intel_rw_latency_page payload = {};
verify_fn = verify_intel_write_latency_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_INTEL_LOG_WRITE_CMD_LATENCY,
NVME_GLOBAL_NAMESPACE_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_intel_get_log_page_directory(void)
{
struct nvme_controller ctrlr = {};
struct nvme_intel_log_page_directory payload = {};
verify_fn = verify_intel_get_log_page_directory;
nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_INTEL_LOG_PAGE_DIRECTORY, NVME_GLOBAL_NAMESPACE_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_generic_get_log_pages(void)
{
test_error_get_log_page();
test_health_get_log_page();
test_firmware_get_log_page();
}
static void test_intel_get_log_pages(void)
{
test_intel_get_log_page_directory();
test_intel_smart_get_log_page();
test_intel_temperature_get_log_page();
test_intel_read_latency_get_log_page();
test_intel_write_latency_get_log_page();
}
static void
@ -238,6 +371,12 @@ test_io_raw_cmd(void)
nvme_ctrlr_cmd_io_raw(&ctrlr, &cmd, NULL, 1, NULL, NULL);
}
static void
test_get_log_pages(void)
{
test_generic_get_log_pages();
test_intel_get_log_pages();
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -254,9 +393,7 @@ int main(int argc, char **argv)
}
if (
CU_add_test(suite, "test ctrlr cmd get_firmware_page", test_firmware_get_log_page) == NULL
|| CU_add_test(suite, "test ctrlr cmd get_health_page", test_health_get_log_page) == NULL
|| CU_add_test(suite, "test ctrlr cmd get_error_page", test_error_get_log_page) == NULL
CU_add_test(suite, "test ctrlr cmd get_log_pages", test_get_log_pages) == NULL
|| CU_add_test(suite, "test ctrlr cmd set_feature", test_set_feature_cmd) == NULL
|| CU_add_test(suite, "test ctrlr cmd get_feature", test_get_feature_cmd) == NULL
|| CU_add_test(suite, "test ctrlr cmd abort_cmd", test_abort_cmd) == NULL