nvme: combine various payload types into a struct
This cleans up the I/O splitting code somewhat. It also moves the SGL payload function pointers up into the hot cache section of struct nvme_request without pushing the other important members past the cacheline boundary (because payload is now a union). Change-Id: I14a5c24f579d57bb84d845147d03aa53bb4bb209 Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
82db40dbd5
commit
407a57165d
@ -122,7 +122,7 @@ nvme_request_size(void)
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req = NULL;
|
||||
@ -145,15 +145,30 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
req->timeout = true;
|
||||
req->sgl_offset = 0;
|
||||
req->parent = NULL;
|
||||
|
||||
req->u.payload = payload;
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_free_request(struct nvme_request *req)
|
||||
{
|
||||
|
@ -608,7 +608,7 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
|
||||
aer->ctrlr = ctrlr;
|
||||
req = nvme_allocate_request(NULL, 0, nvme_ctrlr_async_event_cb, aer);
|
||||
req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
|
||||
aer->req = req;
|
||||
if (req == NULL) {
|
||||
return -1;
|
||||
|
@ -41,7 +41,7 @@ nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = nvme_allocate_request(buf, len, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
|
||||
|
||||
if (req == NULL) {
|
||||
return ENOMEM;
|
||||
@ -62,7 +62,7 @@ nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
|
||||
nvme_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request(buf, len, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return ENOMEM;
|
||||
@ -83,9 +83,9 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_controller_data),
|
||||
cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload,
|
||||
sizeof(struct nvme_controller_data),
|
||||
cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
@ -106,9 +106,9 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_namespace_data),
|
||||
cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload,
|
||||
sizeof(struct nvme_namespace_data),
|
||||
cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
@ -129,7 +129,7 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_CQ;
|
||||
@ -156,7 +156,7 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_SQ;
|
||||
@ -182,7 +182,7 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
struct nvme_command *cmd;
|
||||
|
||||
nvme_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return ENOMEM;
|
||||
@ -209,7 +209,7 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
struct nvme_command *cmd;
|
||||
|
||||
nvme_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return ENOMEM;
|
||||
@ -259,7 +259,7 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
|
||||
struct nvme_command *cmd;
|
||||
|
||||
nvme_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request(payload, payload_size, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return ENOMEM;
|
||||
@ -284,7 +284,7 @@ nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_ABORT;
|
||||
|
@ -105,12 +105,46 @@
|
||||
*/
|
||||
#define DEFAULT_MAX_IO_QUEUES (1024)
|
||||
|
||||
enum nvme_payload_type {
|
||||
NVME_PAYLOAD_TYPE_INVALID = 0,
|
||||
|
||||
/** nvme_request::u.payload.contig_buffer is valid for this request */
|
||||
NVME_PAYLOAD_TYPE_CONTIG,
|
||||
|
||||
/** nvme_request::u.sgl is valid for this request */
|
||||
NVME_PAYLOAD_TYPE_SGL,
|
||||
};
|
||||
|
||||
/**
|
||||
* Descriptor for a request data payload.
|
||||
*
|
||||
* This struct is arranged so that it fits nicely in struct nvme_request.
|
||||
*/
|
||||
struct __attribute__((packed)) nvme_payload {
|
||||
union {
|
||||
/** Virtual memory address of a single physically contiguous buffer */
|
||||
void *contig;
|
||||
|
||||
/**
|
||||
* Functions for retrieving physical addresses for scattered payloads.
|
||||
*/
|
||||
struct {
|
||||
nvme_req_reset_sgl_fn_t reset_sgl_fn;
|
||||
nvme_req_next_sge_fn_t next_sge_fn;
|
||||
} sgl;
|
||||
} u;
|
||||
|
||||
/** \ref nvme_payload_type */
|
||||
uint8_t type;
|
||||
};
|
||||
|
||||
struct nvme_request {
|
||||
struct nvme_command cmd;
|
||||
|
||||
union {
|
||||
void *payload;
|
||||
} u;
|
||||
/**
|
||||
* Data payload for this request's command.
|
||||
*/
|
||||
struct nvme_payload payload;
|
||||
|
||||
uint8_t timeout;
|
||||
uint8_t retries;
|
||||
@ -121,6 +155,13 @@ struct nvme_request {
|
||||
*/
|
||||
uint8_t num_children;
|
||||
uint32_t payload_size;
|
||||
|
||||
/**
|
||||
* Offset in bytes from the beginning of payload for this request.
|
||||
* This is used for I/O commands that are split into multiple requests.
|
||||
*/
|
||||
uint32_t payload_offset;
|
||||
|
||||
nvme_cb_fn_t cb_fn;
|
||||
void *cb_arg;
|
||||
STAILQ_ENTRY(nvme_request) stailq;
|
||||
@ -159,13 +200,6 @@ struct nvme_request {
|
||||
* status once all child requests are completed.
|
||||
*/
|
||||
struct nvme_completion parent_status;
|
||||
|
||||
/**
|
||||
* Functions for retrieving physical addresses for scattered payloads.
|
||||
*/
|
||||
nvme_req_reset_sgl_fn_t reset_sgl_fn;
|
||||
nvme_req_next_sge_fn_t next_sge_fn;
|
||||
uint32_t sgl_offset;
|
||||
};
|
||||
|
||||
struct nvme_completion_poll_status {
|
||||
@ -397,9 +431,11 @@ int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
|
||||
struct nvme_controller *ctrlr);
|
||||
void nvme_ns_destruct(struct nvme_namespace *ns);
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request(const struct nvme_payload *payload,
|
||||
uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request_contig(void *buffer, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
void nvme_free_request(struct nvme_request *req);
|
||||
|
||||
#endif /* __NVME_INTERNAL_H__ */
|
||||
|
@ -38,12 +38,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
|
||||
uint32_t opc, uint32_t io_flags,
|
||||
nvme_req_reset_sgl_fn_t reset_sgl_fn,
|
||||
nvme_req_next_sge_fn_t next_sge_fn);
|
||||
static struct nvme_request *_nvme_ns_cmd_rw(struct nvme_namespace *ns,
|
||||
const struct nvme_payload *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg, uint32_t opc, uint32_t io_flags);
|
||||
|
||||
static void
|
||||
nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl)
|
||||
@ -89,13 +87,12 @@ nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
|
||||
}
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
|
||||
_nvme_ns_cmd_split_request(struct nvme_namespace *ns,
|
||||
const struct nvme_payload *payload,
|
||||
uint64_t lba, uint32_t lba_count,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
|
||||
uint32_t io_flags, struct nvme_request *req,
|
||||
uint32_t sectors_per_max_io, uint32_t sector_mask,
|
||||
nvme_req_reset_sgl_fn_t reset_sgl_fn,
|
||||
nvme_req_next_sge_fn_t next_sge_fn)
|
||||
uint32_t sectors_per_max_io, uint32_t sector_mask)
|
||||
{
|
||||
uint32_t sector_size = ns->sector_size;
|
||||
uint32_t remaining_lba_count = lba_count;
|
||||
@ -107,30 +104,25 @@ _nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
|
||||
lba_count = nvme_min(remaining_lba_count, lba_count);
|
||||
|
||||
child = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn,
|
||||
cb_arg, opc, io_flags, reset_sgl_fn, next_sge_fn);
|
||||
cb_arg, opc, io_flags);
|
||||
if (child == NULL) {
|
||||
nvme_free_request(req);
|
||||
return NULL;
|
||||
}
|
||||
child->payload_offset = offset;
|
||||
nvme_request_add_child(req, child);
|
||||
remaining_lba_count -= lba_count;
|
||||
lba += lba_count;
|
||||
if (req->u.payload == NULL) {
|
||||
child->sgl_offset = offset;
|
||||
offset += lba_count * ns->sector_size;
|
||||
} else
|
||||
payload = (void *)((uintptr_t)payload + (lba_count * sector_size));
|
||||
offset += lba_count * sector_size;
|
||||
}
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
|
||||
uint32_t opc, uint32_t io_flags,
|
||||
nvme_req_reset_sgl_fn_t reset_sgl_fn,
|
||||
nvme_req_next_sge_fn_t next_sge_fn)
|
||||
_nvme_ns_cmd_rw(struct nvme_namespace *ns, const struct nvme_payload *payload,
|
||||
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
|
||||
uint32_t io_flags)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
@ -153,9 +145,6 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
req->reset_sgl_fn = reset_sgl_fn;
|
||||
req->next_sge_fn = next_sge_fn;
|
||||
|
||||
/*
|
||||
* Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
|
||||
* If this controller defines a stripe boundary and this I/O spans a stripe
|
||||
@ -166,12 +155,10 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
(((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
|
||||
|
||||
return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
|
||||
io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
|
||||
reset_sgl_fn, next_sge_fn);
|
||||
io_flags, req, sectors_per_stripe, sectors_per_stripe - 1);
|
||||
} else if (lba_count > sectors_per_max_io) {
|
||||
return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
|
||||
io_flags, req, sectors_per_max_io, 0,
|
||||
reset_sgl_fn, next_sge_fn);
|
||||
io_flags, req, sectors_per_max_io, 0);
|
||||
} else {
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = opc;
|
||||
@ -188,14 +175,17 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
}
|
||||
|
||||
int
|
||||
nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
nvme_ns_cmd_read(struct nvme_namespace *ns, void *buffer, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
|
||||
uint32_t io_flags)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_payload payload;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags,
|
||||
NULL, NULL);
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags);
|
||||
if (req != NULL) {
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
return 0;
|
||||
@ -211,9 +201,13 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
|
||||
nvme_req_next_sge_fn_t next_sge_fn)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_payload payload;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags,
|
||||
reset_sgl_fn, next_sge_fn);
|
||||
payload.type = NVME_PAYLOAD_TYPE_SGL;
|
||||
payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
|
||||
payload.u.sgl.next_sge_fn = next_sge_fn;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags);
|
||||
if (req != NULL) {
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
return 0;
|
||||
@ -223,14 +217,17 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
|
||||
}
|
||||
|
||||
int
|
||||
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
nvme_ns_cmd_write(struct nvme_namespace *ns, void *buffer, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
|
||||
uint32_t io_flags)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_payload payload;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags,
|
||||
NULL, NULL);
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags);
|
||||
if (req != NULL) {
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
return 0;
|
||||
@ -246,9 +243,13 @@ nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
|
||||
nvme_req_next_sge_fn_t next_sge_fn)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_payload payload;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags,
|
||||
reset_sgl_fn, next_sge_fn);
|
||||
payload.type = NVME_PAYLOAD_TYPE_SGL;
|
||||
payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
|
||||
payload.u.sgl.next_sge_fn = next_sge_fn;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags);
|
||||
if (req != NULL) {
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
return 0;
|
||||
@ -268,9 +269,9 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
num_ranges * sizeof(struct nvme_dsm_range),
|
||||
cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload,
|
||||
num_ranges * sizeof(struct nvme_dsm_range),
|
||||
cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
@ -294,7 +295,7 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
@ -319,9 +320,9 @@ nvme_ns_cmd_reservation_register(struct nvme_namespace *ns,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_reservation_register_data),
|
||||
cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload,
|
||||
sizeof(struct nvme_reservation_register_data),
|
||||
cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
@ -353,7 +354,8 @@ nvme_ns_cmd_reservation_release(struct nvme_namespace *ns,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(payload, sizeof(struct nvme_reservation_key_data), cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload, sizeof(struct nvme_reservation_key_data), cb_fn,
|
||||
cb_arg);
|
||||
if (req == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
@ -385,9 +387,9 @@ nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_reservation_acquire_data),
|
||||
cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(payload,
|
||||
sizeof(struct nvme_reservation_acquire_data),
|
||||
cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
@ -685,16 +685,17 @@ _nvme_qpair_build_sgl_request(struct nvme_qpair *qpair, struct nvme_request *req
|
||||
*/
|
||||
|
||||
parent = req->parent ? req->parent : req;
|
||||
nvme_assert(req->reset_sgl_fn != NULL, ("sgl reset callback required\n"));
|
||||
req->reset_sgl_fn(parent->cb_arg, req->sgl_offset);
|
||||
nvme_assert(req->payload.type == NVME_PAYLOAD_TYPE_SGL, ("sgl payload type required\n"));
|
||||
nvme_assert(req->payload.u.sgl.reset_sgl_fn != NULL, ("sgl reset callback required\n"));
|
||||
req->payload.u.sgl.reset_sgl_fn(parent->cb_arg, req->payload_offset);
|
||||
|
||||
remaining_transfer_len = req->payload_size;
|
||||
total_nseg = 0;
|
||||
last_nseg = 0;
|
||||
|
||||
while (remaining_transfer_len > 0) {
|
||||
nvme_assert(req->next_sge_fn != NULL, ("sgl callback required\n"));
|
||||
rc = req->next_sge_fn(parent->cb_arg, &phys_addr, &length);
|
||||
nvme_assert(req->payload.u.sgl.next_sge_fn != NULL, ("sgl callback required\n"));
|
||||
rc = req->payload.u.sgl.next_sge_fn(parent->cb_arg, &phys_addr, &length);
|
||||
if (rc)
|
||||
return -1;
|
||||
|
||||
@ -803,12 +804,15 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
tr->req = req;
|
||||
req->cmd.cid = tr->cid;
|
||||
|
||||
if (req->u.payload) {
|
||||
if (req->payload_size == 0) {
|
||||
/* Null payload - leave PRP fields zeroed */
|
||||
} else if (req->payload.type == NVME_PAYLOAD_TYPE_CONTIG) {
|
||||
/*
|
||||
* Build PRP list describing payload buffer.
|
||||
*/
|
||||
void *payload = req->payload.u.contig + req->payload_offset;
|
||||
|
||||
phys_addr = nvme_vtophys(req->u.payload);
|
||||
phys_addr = nvme_vtophys(payload);
|
||||
if (phys_addr == NVME_VTOPHYS_ERROR) {
|
||||
_nvme_fail_request_bad_vtophys(qpair, tr);
|
||||
return;
|
||||
@ -823,13 +827,13 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
tr->req->cmd.psdt = NVME_PSDT_PRP;
|
||||
tr->req->cmd.dptr.prp.prp1 = phys_addr;
|
||||
if (nseg == 2) {
|
||||
seg_addr = req->u.payload + PAGE_SIZE - unaligned;
|
||||
seg_addr = payload + PAGE_SIZE - unaligned;
|
||||
tr->req->cmd.dptr.prp.prp2 = nvme_vtophys(seg_addr);
|
||||
} else if (nseg > 2) {
|
||||
cur_nseg = 1;
|
||||
tr->req->cmd.dptr.prp.prp2 = (uint64_t)tr->prp_bus_addr;
|
||||
while (cur_nseg < nseg) {
|
||||
seg_addr = req->u.payload + cur_nseg * PAGE_SIZE - unaligned;
|
||||
seg_addr = payload + cur_nseg * PAGE_SIZE - unaligned;
|
||||
phys_addr = nvme_vtophys(seg_addr);
|
||||
if (phys_addr == NVME_VTOPHYS_ERROR) {
|
||||
_nvme_fail_request_bad_vtophys(qpair, tr);
|
||||
@ -839,12 +843,16 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
cur_nseg++;
|
||||
}
|
||||
}
|
||||
} else if (req->u.payload == NULL && req->payload_size != 0) {
|
||||
} else if (req->payload.type == NVME_PAYLOAD_TYPE_SGL) {
|
||||
rc = _nvme_qpair_build_sgl_request(qpair, req, tr);
|
||||
if (rc < 0) {
|
||||
_nvme_fail_request_bad_vtophys(qpair, tr);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
nvme_assert(0, ("invalid NVMe payload type %d\n", req->payload.type));
|
||||
_nvme_fail_request_bad_vtophys(qpair, tr);
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_qpair_submit_tracker(qpair, tr);
|
||||
|
@ -148,8 +148,8 @@ nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
|
||||
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req = NULL;
|
||||
nvme_alloc_request(&req);
|
||||
@ -157,21 +157,35 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
if (req != NULL) {
|
||||
memset(req, 0, offsetof(struct nvme_request, children));
|
||||
|
||||
if (payload == NULL || payload_size == 0) {
|
||||
req->u.payload = NULL;
|
||||
req->payload_size = 0;
|
||||
} else {
|
||||
req->u.payload = payload;
|
||||
req->payload_size = payload_size;
|
||||
}
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
req->timeout = true;
|
||||
}
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvme_ctrlr_fail(void)
|
||||
{
|
||||
|
@ -177,20 +177,15 @@ static void verify_intel_get_log_page_directory(struct nvme_request *req)
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req = &g_req;
|
||||
|
||||
memset(req, 0, sizeof(*req));
|
||||
|
||||
if (payload == NULL || payload_size == 0) {
|
||||
req->u.payload = NULL;
|
||||
req->payload_size = 0;
|
||||
} else {
|
||||
req->u.payload = payload;
|
||||
req->payload_size = payload_size;
|
||||
}
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
@ -199,6 +194,23 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req)
|
||||
|
@ -56,8 +56,8 @@ uint64_t nvme_vtophys(void *buf)
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req = NULL;
|
||||
|
||||
@ -79,20 +79,29 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
req->timeout = true;
|
||||
nvme_assert((payload == NULL && payload_size == 0) ||
|
||||
(payload != NULL && payload_size != 0),
|
||||
("Invalid argument combination of payload and payload_size\n"));
|
||||
if (payload == NULL || payload_size == 0) {
|
||||
req->u.payload = NULL;
|
||||
req->payload_size = 0;
|
||||
} else {
|
||||
req->u.payload = payload;
|
||||
req->payload_size = payload_size;
|
||||
}
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_free_request(struct nvme_request *req)
|
||||
{
|
||||
@ -208,7 +217,7 @@ test3(void)
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr, ®s);
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, expected_success_callback, NULL);
|
||||
req = nvme_allocate_request_null(expected_success_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
CU_ASSERT(qpair.sq_tail == 0);
|
||||
@ -232,7 +241,7 @@ test4(void)
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr, ®s);
|
||||
|
||||
req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL);
|
||||
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
/* Force vtophys to return a failure. This should
|
||||
@ -265,7 +274,7 @@ test_ctrlr_failed(void)
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr, ®s);
|
||||
|
||||
req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL);
|
||||
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
/* Disable the queue and set the controller to failed.
|
||||
@ -311,14 +320,14 @@ static void test_nvme_qpair_fail(void)
|
||||
tr_temp = nvme_malloc("nvme_tracker", sizeof(struct nvme_tracker),
|
||||
64, &phys_addr);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
|
||||
tr_temp->req = nvme_allocate_request(NULL, 0, expected_failure_callback, NULL);
|
||||
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
|
||||
|
||||
LIST_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, list);
|
||||
nvme_qpair_fail(&qpair);
|
||||
CU_ASSERT_TRUE(LIST_EMPTY(&qpair.outstanding_tr));
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, expected_failure_callback, NULL);
|
||||
req = nvme_allocate_request_null(expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
|
||||
@ -394,7 +403,7 @@ static void test_nvme_qpair_destroy(void)
|
||||
tr_temp = nvme_malloc("nvme_tracker", sizeof(struct nvme_tracker),
|
||||
64, &phys_addr);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
|
||||
tr_temp->req = nvme_allocate_request(NULL, 0, expected_failure_callback, NULL);
|
||||
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
|
||||
|
||||
tr_temp->req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
|
||||
|
Loading…
x
Reference in New Issue
Block a user