Add struct nvme_request object which contains all of the parameters passed
from an NVMe consumer. This allows us to mostly build NVMe command buffers without holding the qpair lock, and also allows for future queueing of nvme_request objects in cases where the submission queue is full and no nvme_tracker objects are available. Sponsored by: Intel
This commit is contained in:
parent
f2b19f67ae
commit
ad697276ce
@ -32,6 +32,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/conf.h>
|
||||
#include <sys/module.h>
|
||||
|
||||
#include <vm/uma.h>
|
||||
|
||||
#include <dev/pci/pcireg.h>
|
||||
#include <dev/pci/pcivar.h>
|
||||
|
||||
@ -44,6 +46,8 @@ struct nvme_consumer {
|
||||
|
||||
struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
|
||||
|
||||
uma_zone_t nvme_request_zone;
|
||||
|
||||
MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
|
||||
|
||||
static int nvme_probe(device_t);
|
||||
@ -109,6 +113,23 @@ nvme_probe (device_t device)
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_init(void)
|
||||
{
|
||||
nvme_request_zone = uma_zcreate("nvme_request",
|
||||
sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
|
||||
}
|
||||
|
||||
SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
|
||||
|
||||
static void
|
||||
nvme_uninit(void)
|
||||
{
|
||||
uma_zdestroy(nvme_request_zone);
|
||||
}
|
||||
|
||||
SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
|
||||
|
||||
static void
|
||||
nvme_load(void)
|
||||
{
|
||||
@ -224,13 +245,13 @@ nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
|
||||
* we can safely just transfer each segment to its
|
||||
* associated PRP entry.
|
||||
*/
|
||||
tr->cmd.prp1 = seg[0].ds_addr;
|
||||
tr->req->cmd.prp1 = seg[0].ds_addr;
|
||||
|
||||
if (nseg == 2) {
|
||||
tr->cmd.prp2 = seg[1].ds_addr;
|
||||
tr->req->cmd.prp2 = seg[1].ds_addr;
|
||||
} else if (nseg > 2) {
|
||||
cur_nseg = 1;
|
||||
tr->cmd.prp2 = (uint64_t)tr->prp_bus_addr;
|
||||
tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr;
|
||||
while (cur_nseg < nseg) {
|
||||
tr->prp[cur_nseg-1] =
|
||||
(uint64_t)seg[cur_nseg].ds_addr;
|
||||
@ -243,7 +264,7 @@ nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
|
||||
|
||||
struct nvme_tracker *
|
||||
nvme_allocate_tracker(struct nvme_controller *ctrlr, boolean_t is_admin,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t payload_size, void *payload)
|
||||
struct nvme_request *req)
|
||||
{
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_qpair *qpair;
|
||||
@ -262,12 +283,8 @@ nvme_allocate_tracker(struct nvme_controller *ctrlr, boolean_t is_admin,
|
||||
if (tr == NULL)
|
||||
return (NULL);
|
||||
|
||||
memset(&tr->cmd, 0, sizeof(tr->cmd));
|
||||
|
||||
tr->qpair = qpair;
|
||||
tr->cb_fn = cb_fn;
|
||||
tr->cb_arg = cb_arg;
|
||||
tr->payload_size = payload_size;
|
||||
tr->req = req;
|
||||
|
||||
return (tr);
|
||||
}
|
||||
|
@ -33,14 +33,17 @@ void
|
||||
nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg,
|
||||
sizeof(struct nvme_controller_data), payload);
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_controller_data), cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
|
||||
/*
|
||||
@ -50,7 +53,7 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
|
||||
cmd->cdw10 = 1;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
tr->payload_size, nvme_payload_map, tr, 0);
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
}
|
||||
@ -59,14 +62,17 @@ void
|
||||
nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
|
||||
void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg,
|
||||
sizeof(struct nvme_namespace_data), payload);
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
|
||||
/*
|
||||
@ -75,7 +81,7 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
|
||||
cmd->nsid = nsid;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
tr->payload_size, nvme_payload_map, tr, 0);
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
}
|
||||
@ -85,12 +91,15 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_CQ;
|
||||
|
||||
/*
|
||||
@ -109,12 +118,15 @@ void
|
||||
nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_SQ;
|
||||
|
||||
/*
|
||||
@ -133,12 +145,15 @@ void
|
||||
nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DELETE_IO_CQ;
|
||||
|
||||
/*
|
||||
@ -154,12 +169,15 @@ void
|
||||
nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DELETE_IO_SQ;
|
||||
|
||||
/*
|
||||
@ -176,14 +194,16 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
uint32_t cdw11, void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg,
|
||||
payload_size, payload);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_SET_FEATURES;
|
||||
cmd->cdw10 = feature;
|
||||
cmd->cdw11 = cdw11;
|
||||
@ -202,14 +222,16 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
uint32_t cdw11, void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg,
|
||||
payload_size, payload);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_GET_FEATURES;
|
||||
cmd->cdw10 = feature;
|
||||
cmd->cdw11 = cdw11;
|
||||
@ -276,12 +298,15 @@ void
|
||||
nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_ASYNC_EVENT_REQUEST;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
@ -292,14 +317,16 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
|
||||
uint32_t nsid, struct nvme_health_information_page *payload,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg,
|
||||
sizeof(*payload), payload);
|
||||
req = nvme_allocate_request(payload, sizeof(*payload), cb_fn, cb_arg);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_GET_LOG_PAGE;
|
||||
cmd->nsid = nsid;
|
||||
cmd->cdw10 = ((sizeof(*payload)/sizeof(uint32_t)) - 1) << 16;
|
||||
|
@ -33,17 +33,19 @@ int
|
||||
nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg,
|
||||
lba_count*512, payload);
|
||||
req = nvme_allocate_request(payload, lba_count*512, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_READ;
|
||||
cmd->nsid = ns->id;
|
||||
|
||||
@ -52,7 +54,7 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
cmd->cdw12 = lba_count-1;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
tr->payload_size, nvme_payload_map, tr, 0);
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
|
||||
@ -63,17 +65,19 @@ int
|
||||
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg,
|
||||
lba_count*512, payload);
|
||||
req = nvme_allocate_request(payload, lba_count*512, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_WRITE;
|
||||
cmd->nsid = ns->id;
|
||||
|
||||
@ -82,7 +86,7 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
cmd->cdw12 = lba_count-1;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
tr->payload_size, nvme_payload_map, tr, 0);
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
|
||||
@ -93,17 +97,20 @@ int
|
||||
nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg,
|
||||
num_ranges * sizeof(struct nvme_dsm_range), payload);
|
||||
req = nvme_allocate_request(payload,
|
||||
num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
|
||||
cmd->nsid = ns->id;
|
||||
|
||||
@ -112,7 +119,7 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
tr->payload_size, nvme_payload_map, tr, 0);
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
|
||||
@ -122,15 +129,18 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
int
|
||||
nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg, 0, NULL);
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_FLUSH;
|
||||
cmd->nsid = ns->id;
|
||||
|
||||
|
@ -38,6 +38,8 @@
|
||||
#include <sys/systm.h>
|
||||
#include <sys/taskqueue.h>
|
||||
|
||||
#include <vm/uma.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
|
||||
#include "nvme.h"
|
||||
@ -92,16 +94,25 @@ MALLOC_DECLARE(M_NVME);
|
||||
#define CACHE_LINE_SIZE (64)
|
||||
#endif
|
||||
|
||||
extern uma_zone_t nvme_request_zone;
|
||||
|
||||
struct nvme_request {
|
||||
|
||||
struct nvme_command cmd;
|
||||
void *payload;
|
||||
uint32_t payload_size;
|
||||
nvme_cb_fn_t cb_fn;
|
||||
void *cb_arg;
|
||||
SLIST_ENTRY(nvme_request) slist;
|
||||
};
|
||||
|
||||
struct nvme_tracker {
|
||||
|
||||
SLIST_ENTRY(nvme_tracker) slist;
|
||||
struct nvme_request *req;
|
||||
struct nvme_qpair *qpair;
|
||||
struct nvme_command cmd;
|
||||
struct callout timer;
|
||||
bus_dmamap_t payload_dma_map;
|
||||
nvme_cb_fn_t cb_fn;
|
||||
void *cb_arg;
|
||||
uint32_t payload_size;
|
||||
uint16_t cid;
|
||||
|
||||
uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
|
||||
@ -322,9 +333,7 @@ void nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
|
||||
|
||||
struct nvme_tracker * nvme_allocate_tracker(struct nvme_controller *ctrlr,
|
||||
boolean_t is_admin,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg,
|
||||
uint32_t payload_size,
|
||||
void *payload);
|
||||
struct nvme_request *request);
|
||||
void nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg,
|
||||
int error);
|
||||
|
||||
@ -364,4 +373,24 @@ nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
|
||||
*bus_addr = seg[0].ds_addr;
|
||||
}
|
||||
|
||||
static __inline struct nvme_request *
|
||||
nvme_allocate_request(void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
|
||||
if (req == NULL)
|
||||
return (NULL);
|
||||
|
||||
req->payload = payload;
|
||||
req->payload_size = payload_size;
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
|
||||
return (req);
|
||||
}
|
||||
|
||||
#define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
|
||||
|
||||
#endif /* __NVME_PRIVATE_H__ */
|
||||
|
@ -119,6 +119,7 @@ void
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
{
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_request *req;
|
||||
struct nvme_completion *cpl;
|
||||
boolean_t retry, error;
|
||||
|
||||
@ -131,6 +132,8 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
break;
|
||||
|
||||
tr = qpair->act_tr[cpl->cid];
|
||||
req = tr->req;
|
||||
|
||||
KASSERT(tr,
|
||||
("completion queue has entries but no active trackers\n"));
|
||||
|
||||
@ -139,7 +142,7 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
|
||||
if (error) {
|
||||
nvme_dump_completion(cpl);
|
||||
nvme_dump_command(&tr->cmd);
|
||||
nvme_dump_command(&tr->req->cmd);
|
||||
}
|
||||
|
||||
qpair->act_tr[cpl->cid] = NULL;
|
||||
@ -147,8 +150,8 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
KASSERT(cpl->cid == tr->cmd.cid,
|
||||
("cpl cid does not match cmd cid\n"));
|
||||
|
||||
if (tr->cb_fn && !retry)
|
||||
tr->cb_fn(tr->cb_arg, cpl);
|
||||
if (req->cb_fn && !retry)
|
||||
req->cb_fn(req->cb_arg, cpl);
|
||||
|
||||
qpair->sq_head = cpl->sqhd;
|
||||
|
||||
@ -159,10 +162,11 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
/* nvme_qpair_submit_cmd() will release the lock. */
|
||||
nvme_qpair_submit_cmd(qpair, tr);
|
||||
else {
|
||||
if (tr->payload_size > 0)
|
||||
if (req->payload_size > 0)
|
||||
bus_dmamap_unload(qpair->dma_tag,
|
||||
tr->payload_dma_map);
|
||||
|
||||
nvme_free_request(req);
|
||||
SLIST_INSERT_HEAD(&qpair->free_tr, tr, slist);
|
||||
|
||||
mtx_unlock(&qpair->lock);
|
||||
@ -373,8 +377,10 @@ nvme_timeout(void *arg)
|
||||
void
|
||||
nvme_qpair_submit_cmd(struct nvme_qpair *qpair, struct nvme_tracker *tr)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
tr->cmd.cid = tr->cid;
|
||||
req = tr->req;
|
||||
req->cmd.cid = tr->cid;
|
||||
qpair->act_tr[tr->cid] = tr;
|
||||
|
||||
/*
|
||||
@ -393,7 +399,7 @@ nvme_qpair_submit_cmd(struct nvme_qpair *qpair, struct nvme_tracker *tr)
|
||||
callout_reset(&tr->timer, NVME_TIMEOUT_IN_SEC * hz, nvme_timeout, tr);
|
||||
|
||||
/* Copy the command from the tracker to the submission queue. */
|
||||
memcpy(&qpair->cmd[qpair->sq_tail], &tr->cmd, sizeof(tr->cmd));
|
||||
memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
|
||||
|
||||
if (++qpair->sq_tail == qpair->num_entries)
|
||||
qpair->sq_tail = 0;
|
||||
|
@ -48,7 +48,8 @@ nvme_uio_done(void *arg, const struct nvme_completion *status)
|
||||
}
|
||||
|
||||
static struct nvme_tracker *
|
||||
nvme_allocate_tracker_uio(struct nvme_controller *ctrlr, struct uio *uio)
|
||||
nvme_allocate_tracker_uio(struct nvme_controller *ctrlr, struct uio *uio,
|
||||
struct nvme_request *req)
|
||||
{
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_qpair *qpair;
|
||||
@ -63,11 +64,8 @@ nvme_allocate_tracker_uio(struct nvme_controller *ctrlr, struct uio *uio)
|
||||
if (tr == NULL)
|
||||
return (NULL);
|
||||
|
||||
memset(&tr->cmd, 0, sizeof(tr->cmd));
|
||||
|
||||
tr->qpair = qpair;
|
||||
tr->cb_fn = nvme_uio_done;
|
||||
tr->cb_arg = uio;
|
||||
tr->req = req;
|
||||
|
||||
return (tr);
|
||||
}
|
||||
@ -82,27 +80,30 @@ nvme_payload_map_uio(void *arg, bus_dma_segment_t *seg, int nseg,
|
||||
static int
|
||||
nvme_read_uio(struct nvme_namespace *ns, struct uio *uio)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err, i;
|
||||
uint64_t lba, iosize = 0;
|
||||
|
||||
tr = nvme_allocate_tracker_uio(ns->ctrlr, uio);
|
||||
for (i = 0; i < uio->uio_iovcnt; i++) {
|
||||
iosize += uio->uio_iov[i].iov_len;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request(NULL, iosize, nvme_uio_done, uio);
|
||||
|
||||
tr = nvme_allocate_tracker_uio(ns->ctrlr, uio, req);
|
||||
|
||||
if (tr == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_READ;
|
||||
cmd->nsid = ns->id;
|
||||
lba = uio->uio_offset / nvme_ns_get_sector_size(ns);
|
||||
|
||||
*(uint64_t *)&cmd->cdw10 = lba;
|
||||
|
||||
for (i = 0; i < uio->uio_iovcnt; i++) {
|
||||
iosize += uio->uio_iov[i].iov_len;
|
||||
}
|
||||
|
||||
cmd->cdw12 = (iosize / nvme_ns_get_sector_size(ns))-1;
|
||||
|
||||
err = bus_dmamap_load_uio(tr->qpair->dma_tag, tr->payload_dma_map, uio,
|
||||
@ -116,27 +117,30 @@ nvme_read_uio(struct nvme_namespace *ns, struct uio *uio)
|
||||
static int
|
||||
nvme_write_uio(struct nvme_namespace *ns, struct uio *uio)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err, i;
|
||||
uint64_t lba, iosize = 0;
|
||||
|
||||
tr = nvme_allocate_tracker_uio(ns->ctrlr, uio);
|
||||
for (i = 0; i < uio->uio_iovcnt; i++) {
|
||||
iosize += uio->uio_iov[i].iov_len;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request(NULL, iosize, nvme_uio_done, uio);
|
||||
|
||||
tr = nvme_allocate_tracker_uio(ns->ctrlr, uio, req);
|
||||
|
||||
if (tr == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &tr->cmd;
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_WRITE;
|
||||
cmd->nsid = ns->id;
|
||||
lba = uio->uio_offset / nvme_ns_get_sector_size(ns);
|
||||
|
||||
*(uint64_t *)&cmd->cdw10 = lba;
|
||||
|
||||
for (i = 0; i < uio->uio_iovcnt; i++) {
|
||||
iosize += uio->uio_iov[i].iov_len;
|
||||
}
|
||||
|
||||
cmd->cdw12 = (iosize / nvme_ns_get_sector_size(ns))-1;
|
||||
|
||||
err = bus_dmamap_load_uio(tr->qpair->dma_tag, tr->payload_dma_map, uio,
|
||||
|
Loading…
x
Reference in New Issue
Block a user