Add unmapped bio support to nvme(4) and nvd(4).

Sponsored by:	Intel
This commit is contained in:
Jim Harris 2013-04-01 16:23:34 +00:00
parent 20547d41f8
commit 5fdf9c3c8e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=248977
6 changed files with 119 additions and 10 deletions

View File

@ -301,6 +301,11 @@ nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
/* ifdef used here to ease porting to stable branches at a later point. */
#ifdef DISKFLAG_UNMAPPED_BIO
disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
#endif
strlcpy(disk->d_ident, nvme_ns_get_serial_number(ns),
sizeof(disk->d_ident));

View File

@ -758,9 +758,13 @@ void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg);
int nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn, void *cb_arg);
int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg);
int nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn, void *cb_arg);
int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
uint8_t num_ranges, nvme_cb_fn_t cb_fn,
void *cb_arg);

View File

@ -150,11 +150,17 @@ nvme_ns_strategy(struct bio *bp)
static struct cdevsw nvme_ns_cdevsw = {
.d_version = D_VERSION,
#ifdef NVME_UNMAPPED_BIO_SUPPORT
.d_flags = D_DISK | D_UNMAPPED_IO,
.d_read = physread,
.d_write = physwrite,
#else
.d_flags = D_DISK,
.d_open = nvme_ns_open,
.d_close = nvme_ns_close,
.d_read = nvme_ns_physio,
.d_write = nvme_ns_physio,
#endif
.d_open = nvme_ns_open,
.d_close = nvme_ns_close,
.d_strategy = nvme_ns_strategy,
.d_ioctl = nvme_ns_ioctl
};
@ -233,16 +239,10 @@ nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
switch (bp->bio_cmd) {
case BIO_READ:
err = nvme_ns_cmd_read(ns, bp->bio_data,
bp->bio_offset/nvme_ns_get_sector_size(ns),
bp->bio_bcount/nvme_ns_get_sector_size(ns),
nvme_ns_bio_done, bp);
err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
break;
case BIO_WRITE:
err = nvme_ns_cmd_write(ns, bp->bio_data,
bp->bio_offset/nvme_ns_get_sector_size(ns),
bp->bio_bcount/nvme_ns_get_sector_size(ns),
nvme_ns_bio_done, bp);
err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
break;
case BIO_FLUSH:
err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);

View File

@ -53,6 +53,35 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
return (0);
}
int
nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct nvme_command *cmd;
uint64_t lba;
uint64_t lba_count;
req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
cmd = &req->cmd;
cmd->opc = NVME_OPC_READ;
cmd->nsid = ns->id;
lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
/* TODO: create a read command data structure */
*(uint64_t *)&cmd->cdw10 = lba;
cmd->cdw12 = lba_count-1;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return (0);
}
int
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
@ -79,6 +108,35 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
return (0);
}
int
nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct nvme_command *cmd;
uint64_t lba;
uint64_t lba_count;
req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
cmd = &req->cmd;
cmd->opc = NVME_OPC_WRITE;
cmd->nsid = ns->id;
lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
/* TODO: create a write command data structure */
*(uint64_t *)&cmd->cdw10 = lba;
cmd->cdw12 = lba_count-1;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return (0);
}
int
nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)

View File

@ -30,6 +30,7 @@
#define __NVME_PRIVATE_H__
#include <sys/param.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@ -114,6 +115,16 @@ MALLOC_DECLARE(M_NVME);
#define CACHE_LINE_SIZE (64)
#endif
/*
* Use presence of the BIO_UNMAPPED flag to determine whether unmapped I/O
* support and the bus_dmamap_load_bio API are available on the target
* kernel. This will ease porting back to earlier stable branches at a
* later point.
*/
#ifdef BIO_UNMAPPED
#define NVME_UNMAPPED_BIO_SUPPORT
#endif
extern uma_zone_t nvme_request_zone;
extern int32_t nvme_retry_count;
@ -126,6 +137,9 @@ struct nvme_completion_poll_status {
#define NVME_REQUEST_VADDR 1
#define NVME_REQUEST_NULL 2 /* For requests with no payload. */
#define NVME_REQUEST_UIO 3
#ifdef NVME_UNMAPPED_BIO_SUPPORT
#define NVME_REQUEST_BIO 4
#endif
struct nvme_request {
@ -134,6 +148,7 @@ struct nvme_request {
union {
void *payload;
struct uio *uio;
struct bio *bio;
} u;
uint32_t type;
uint32_t payload_size;
@ -527,6 +542,25 @@ nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg)
return (req);
}
static __inline struct nvme_request *
nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
req = _nvme_allocate_request(cb_fn, cb_arg);
if (req != NULL) {
#ifdef NVME_UNMAPPED_BIO_SUPPORT
req->type = NVME_REQUEST_BIO;
req->u.bio = bio;
#else
req->type = NVME_REQUEST_VADDR;
req->u.payload = bio->bio_data;
req->payload_size = bio->bio_bcount;
#endif
}
return (req);
}
#define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
void nvme_notify_async_consumers(struct nvme_controller *ctrlr,

View File

@ -757,6 +757,14 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
if (err != 0)
panic("bus_dmamap_load_uio returned non-zero!\n");
break;
#ifdef NVME_UNMAPPED_BIO_SUPPORT
case NVME_REQUEST_BIO:
err = bus_dmamap_load_bio(tr->qpair->dma_tag,
tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
if (err != 0)
panic("bus_dmamap_load_bio returned non-zero!\n");
break;
#endif
default:
panic("unknown nvme request type 0x%x\n", req->type);
break;