Add return codes to all functions used for submitting commands to I/O

queues.

Sponsored by:	Intel
This commit is contained in:
Jim Harris 2012-10-18 00:32:07 +00:00
parent 8651c115b8
commit 9eb93f2976
7 changed files with 100 additions and 30 deletions

View File

@ -214,9 +214,21 @@ nvd_bioq_process(void *arg, int pending)
err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
/*
* TODO: remove this loop and rely on GEOM's pacing once
* nvme(4) returns ENOMEM only for malloc() failures.
* Currently nvme(4) returns ENOMEM also for cases when
* the submission queue is completely full, and that case
* will be handled more elegantly in a future update.
*/
while (err == ENOMEM) {
pause("nvd enomem", 1);
err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
}
if (err) {
atomic_add_acq_int(&ndisk->cur_depth, -1);
bp->bio_error = EIO;
bp->bio_error = err;
bp->bio_flags |= BIO_ERROR;
bp->bio_resid = bp->bio_bcount;
biodone(bp);

View File

@ -275,6 +275,9 @@ nvme_allocate_tracker(struct nvme_controller *ctrlr, boolean_t is_admin,
tr = nvme_qpair_allocate_tracker(qpair, alloc_prp_list);
if (tr == NULL)
return (NULL);
memset(&tr->cmd, 0, sizeof(tr->cmd));
tr->qpair = qpair;

View File

@ -704,16 +704,16 @@ enum nvme_namespace_flags {
};
/* NVM I/O functions */
void nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
uint8_t num_ranges, nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
void *cb_arg);
/* Registration functions */

View File

@ -160,7 +160,7 @@ nvme_ns_strategy(struct bio *bp)
err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
if (err) {
bp->bio_error = EIO;
bp->bio_error = err;
bp->bio_flags |= BIO_ERROR;
bp->bio_resid = bp->bio_bcount;
biodone(bp);
@ -239,25 +239,26 @@ int
nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn)
{
struct nvme_dsm_range *dsm_range;
struct nvme_dsm_range *dsm_range;
int err;
bp->bio_driver1 = cb_fn;
switch (bp->bio_cmd) {
case BIO_READ:
nvme_ns_cmd_read(ns, bp->bio_data,
bp->bio_offset/nvme_ns_get_sector_size(ns),
bp->bio_bcount/nvme_ns_get_sector_size(ns),
nvme_ns_bio_done, bp);
err = nvme_ns_cmd_read(ns, bp->bio_data,
bp->bio_offset/nvme_ns_get_sector_size(ns),
bp->bio_bcount/nvme_ns_get_sector_size(ns),
nvme_ns_bio_done, bp);
break;
case BIO_WRITE:
nvme_ns_cmd_write(ns, bp->bio_data,
bp->bio_offset/nvme_ns_get_sector_size(ns),
bp->bio_bcount/nvme_ns_get_sector_size(ns),
nvme_ns_bio_done, bp);
err = nvme_ns_cmd_write(ns, bp->bio_data,
bp->bio_offset/nvme_ns_get_sector_size(ns),
bp->bio_bcount/nvme_ns_get_sector_size(ns),
nvme_ns_bio_done, bp);
break;
case BIO_FLUSH:
nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
break;
case BIO_DELETE:
/*
@ -272,13 +273,17 @@ nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
dsm_range->starting_lba =
bp->bio_offset/nvme_ns_get_sector_size(ns);
bp->bio_driver2 = dsm_range;
nvme_ns_cmd_deallocate(ns, dsm_range, 1, nvme_ns_bio_done, bp);
err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
nvme_ns_bio_done, bp);
if (err != 0)
free(dsm_range, M_NVME);
break;
default:
return (EIO);
err = EIO;
break;
}
return (0);
return (err);
}
#ifdef CHATHAM2

View File

@ -29,7 +29,7 @@ __FBSDID("$FreeBSD$");
#include "nvme_private.h"
void
int
nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
{
@ -40,6 +40,9 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg,
lba_count*512, payload);
if (tr == NULL)
return (ENOMEM);
cmd = &tr->cmd;
cmd->opc = NVME_OPC_READ;
cmd->nsid = ns->id;
@ -52,9 +55,11 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
tr->payload_size, nvme_payload_map, tr, 0);
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
return (0);
}
void
int
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
{
@ -65,6 +70,9 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg,
lba_count*512, payload);
if (tr == NULL)
return (ENOMEM);
cmd = &tr->cmd;
cmd->opc = NVME_OPC_WRITE;
cmd->nsid = ns->id;
@ -77,9 +85,11 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
tr->payload_size, nvme_payload_map, tr, 0);
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
return (0);
}
void
int
nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
{
@ -90,6 +100,9 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg,
num_ranges * sizeof(struct nvme_dsm_range), payload);
if (tr == NULL)
return (ENOMEM);
cmd = &tr->cmd;
cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
cmd->nsid = ns->id;
@ -102,9 +115,11 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
tr->payload_size, nvme_payload_map, tr, 0);
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
return (0);
}
void
int
nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_tracker *tr;
@ -112,9 +127,14 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, cb_fn, cb_arg, 0, NULL);
if (tr == NULL)
return (ENOMEM);
cmd = &tr->cmd;
cmd->opc = NVME_OPC_FLUSH;
cmd->nsid = ns->id;
nvme_qpair_submit_cmd(tr->qpair, tr);
return (0);
}

View File

@ -84,10 +84,24 @@ nvme_qpair_allocate_tracker(struct nvme_qpair *qpair, boolean_t alloc_prp_list)
tr = SLIST_FIRST(&qpair->free_tr);
if (tr == NULL) {
/* TODO: fail if malloc returns NULL */
/*
* We can't support more trackers than we have entries in
* our queue, because it would generate invalid indices
* into the qpair's active tracker array.
*/
if (qpair->num_tr == qpair->num_entries) {
mtx_unlock(&qpair->lock);
return (NULL);
}
tr = malloc(sizeof(struct nvme_tracker), M_NVME,
M_ZERO | M_NOWAIT);
if (tr == NULL) {
mtx_unlock(&qpair->lock);
return (NULL);
}
bus_dmamap_create(qpair->dma_tag, 0, &tr->dma_map);
callout_init_mtx(&tr->timer, &qpair->lock, 0);
tr->cid = qpair->num_tr++;

View File

@ -64,6 +64,9 @@ nvme_allocate_tracker_uio(struct nvme_controller *ctrlr, struct uio *uio)
*/
tr = nvme_qpair_allocate_tracker(qpair, TRUE /* alloc_prp_list */);
if (tr == NULL)
return (NULL);
memset(&tr->cmd, 0, sizeof(tr->cmd));
tr->qpair = qpair;
@ -80,7 +83,7 @@ nvme_payload_map_uio(void *arg, bus_dma_segment_t *seg, int nseg,
nvme_payload_map(arg, seg, nseg, error);
}
static void
static int
nvme_read_uio(struct nvme_namespace *ns, struct uio *uio)
{
struct nvme_tracker *tr;
@ -90,6 +93,9 @@ nvme_read_uio(struct nvme_namespace *ns, struct uio *uio)
tr = nvme_allocate_tracker_uio(ns->ctrlr, uio);
if (tr == NULL)
return (ENOMEM);
cmd = &tr->cmd;
cmd->opc = NVME_OPC_READ;
cmd->nsid = ns->id;
@ -107,9 +113,11 @@ nvme_read_uio(struct nvme_namespace *ns, struct uio *uio)
nvme_payload_map_uio, tr, 0);
KASSERT(err == 0, ("bus_dmamap_load_uio returned non-zero!\n"));
return (0);
}
static void
static int
nvme_write_uio(struct nvme_namespace *ns, struct uio *uio)
{
struct nvme_tracker *tr;
@ -119,6 +127,9 @@ nvme_write_uio(struct nvme_namespace *ns, struct uio *uio)
tr = nvme_allocate_tracker_uio(ns->ctrlr, uio);
if (tr == NULL)
return (ENOMEM);
cmd = &tr->cmd;
cmd->opc = NVME_OPC_WRITE;
cmd->nsid = ns->id;
@ -136,6 +147,8 @@ nvme_write_uio(struct nvme_namespace *ns, struct uio *uio)
nvme_payload_map_uio, tr, 0);
KASSERT(err == 0, ("bus_dmamap_load_uio returned non-zero!\n"));
return (0);
}
int
@ -143,6 +156,7 @@ nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag)
{
struct nvme_namespace *ns;
struct mtx *mtx;
int err;
#if __FreeBSD_version > 900017
int ref;
#endif
@ -160,11 +174,12 @@ nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag)
mtx_lock(mtx);
if (uio->uio_rw == UIO_READ)
nvme_read_uio(ns, uio);
err = nvme_read_uio(ns, uio);
else
nvme_write_uio(ns, uio);
err = nvme_write_uio(ns, uio);
msleep(uio, mtx, PRIBIO, "nvme_physio", 0);
if (err == 0)
msleep(uio, mtx, PRIBIO, "nvme_physio", 0);
mtx_unlock(mtx);
#if __FreeBSD_version > 900017
@ -173,7 +188,8 @@ nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag)
dev_relthread(dev);
#endif
uio->uio_resid = 0;
if (err == 0)
uio->uio_resid = 0;
PRELE(curproc);
return (0);