nvme: Add functions spdk_nvme_ns_cmd_readv/writev_ext

These functions accept extendable structure with IO request options.
The options structure contains a memory domain that can be used to
translate or fetch data, metadata pointer and end-to-end data
protection parameters

Change-Id: I65bfba279904e77539348520c3dfac7aadbe80d9
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6270
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
This commit is contained in:
Alexey Marchuk 2021-01-09 17:06:31 +03:00 committed by Tomasz Zawadzki
parent a422d8b06f
commit 110335f192
8 changed files with 504 additions and 52 deletions

View File

@ -19,6 +19,11 @@ log level.
New API `spdk_nvme_ctrlr_get_memory_domain` has been added, it allows to get SPDK memory domain used by nvme controller.
New API functions `spdk_nvme_ns_cmd_readv_ext` and `spdk_nvme_ns_cmd_writev_ext`
have been added. These functions accept `spdk_nvme_ns_cmd_ext_io_opts` structure with extended IO request
options, e.g. DMA memory domain which describes data that may belong to another memory domain and
can't be accessed directly.
## v21.07:
### accel_fw

View File

@ -521,6 +521,27 @@ enum spdk_nvme_ctrlr_flags {
SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED = 1 << 6, /**< The Directives is supported */
};
/**
* Structure with optional IO request parameters
*/
struct spdk_nvme_ns_cmd_ext_io_opts {
/** size of this structure in bytes */
size_t size;
/** Memory domain which describes data payload in IO request. The controller must support
* the corresponding memory domain type, refer to \ref spdk_nvme_ctrlr_get_memory_domain */
struct spdk_memory_domain *memory_domain;
/** User context to be passed to memory domain operations */
void *memory_domain_ctx;
/** Flags for this IO, defined in nvme_spec.h */
uint32_t io_flags;
/** Virtual address pointer to the metadata payload, the length of metadata is specified by \ref spdk_nvme_ns_get_md_size */
void *metadata;
/** Application tag mask to use end-to-end protection information. */
uint16_t apptag_mask;
/** Application tag to use end-to-end protection information. */
uint16_t apptag;
};
/**
* Parse the string representation of a transport ID.
*
@ -2897,6 +2918,39 @@ int spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qp
spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
uint16_t apptag_mask, uint16_t apptag);
/**
* Submit a write I/O to the specified NVMe namespace.
*
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any
* given time.
*
* \param ns NVMe namespace to submit the write I/O
* \param qpair I/O queue pair to submit the request
* \param lba starting LBA to write the data
* \param lba_count length (in sectors) for the write operation
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param reset_sgl_fn callback function to reset scattered payload
* \param next_sge_fn callback function to iterate each scattered
* payload memory segment
* \param opts Optional structure with extended IO request options. If provided, the caller must
* guarantee that this structure is accessible until IO completes
*
* \return 0 if successfully submitted, negated errnos on the following error conditions:
* -EINVAL: The request is malformed.
* -ENOMEM: The request cannot be allocated.
* -ENXIO: The qpair is failed at the transport level.
* -EFAULT: Invalid address was specified as part of payload. cb_fn is also called
* with error status including dnr=1 in this case.
*/
int spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn,
struct spdk_nvme_ns_cmd_ext_io_opts *opts);
/**
* Submit a write I/O to the specified NVMe namespace.
*
@ -3077,6 +3131,37 @@ int spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpa
spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
uint16_t apptag_mask, uint16_t apptag);
/**
* Submit a read I/O to the specified NVMe namespace.
*
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*
* \param ns NVMe namespace to submit the read I/O
* \param qpair I/O queue pair to submit the request
* \param lba starting LBA to read the data
* \param lba_count length (in sectors) for the read operation
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param reset_sgl_fn callback function to reset scattered payload
* \param next_sge_fn callback function to iterate each scattered
* payload memory segment
* \param opts Optional structure with extended IO request options. If provided, the caller must
* guarantee that this structure is accessible until IO completes
*
* \return 0 if successfully submitted, negated errnos on the following error conditions:
* -EINVAL: The request is malformed.
* -ENOMEM: The request cannot be allocated.
* -ENXIO: The qpair is failed at the transport level.
* -EFAULT: Invalid address was specified as part of payload. cb_fn is also called
* with error status including dnr=1 in this case.
*/
int spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn,
struct spdk_nvme_ns_cmd_ext_io_opts *opts);
/**
* Submits a read I/O to the specified NVMe namespace.
*

View File

@ -226,6 +226,10 @@ struct nvme_payload {
spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
spdk_nvme_req_next_sge_cb next_sge_fn;
/**
* Exended IO options passed by the user
*/
struct spdk_nvme_ns_cmd_ext_io_opts *opts;
/**
* If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
* virtual memory address of a single virtually contiguous buffer.

View File

@ -4,6 +4,7 @@
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -740,6 +741,49 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
}
}
int
spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn,
struct spdk_nvme_ns_cmd_ext_io_opts *opts)
{
struct nvme_request *req;
struct nvme_payload payload;
int rc = 0;
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
if (opts) {
if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
return -EINVAL;
}
payload.opts = opts;
payload.md = opts->metadata;
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc);
} else {
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
0, 0, 0, true, &rc);
}
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else {
return nvme_ns_map_failure_rc(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests,
rc);
}
}
int
spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
void *buffer, uint64_t lba,
@ -992,6 +1036,49 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
}
}
int
spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn,
struct spdk_nvme_ns_cmd_ext_io_opts *opts)
{
struct nvme_request *req;
struct nvme_payload payload;
int rc = 0;
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
if (opts) {
if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
return -EINVAL;
}
payload.opts = opts;
payload.md = opts->metadata;
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc);
} else {
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
0, 0, 0, true, &rc);
}
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else {
return nvme_ns_map_failure_rc(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests,
rc);
}
}
int
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,

View File

@ -278,6 +278,13 @@ struct spdk_nvme_rdma_rsp {
struct nvme_rdma_wr rdma_wr;
};
struct nvme_rdma_memory_translation_ctx {
void *addr;
size_t length;
uint32_t lkey;
uint32_t rkey;
};
static const char *rdma_cm_event_str[] = {
"RDMA_CM_EVENT_ADDR_RESOLVED",
"RDMA_CM_EVENT_ADDR_ERROR",
@ -1340,6 +1347,55 @@ nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp
return rc;
}
static inline int
nvme_rdma_get_memory_translation(struct nvme_request *req, struct nvme_rdma_qpair *rqpair,
struct nvme_rdma_memory_translation_ctx *_ctx)
{
struct spdk_memory_domain_translation_ctx ctx;
struct spdk_memory_domain_translation_result dma_translation;
struct spdk_rdma_memory_translation rdma_translation;
int rc;
assert(req);
assert(rqpair);
assert(_ctx);
if (req->payload.opts && req->payload.opts->memory_domain) {
ctx.size = sizeof(struct spdk_memory_domain_translation_ctx);
ctx.rdma.ibv_qp = rqpair->rdma_qp->qp;
dma_translation.size = sizeof(struct spdk_memory_domain_translation_result);
rc = spdk_memory_domain_translate_data(req->payload.opts->memory_domain,
req->payload.opts->memory_domain_ctx,
rqpair->memory_domain->domain, &ctx, _ctx->addr,
_ctx->length, &dma_translation);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("DMA memory translation failed, rc %d\n", rc);
return rc;
}
_ctx->lkey = dma_translation.rdma.lkey;
_ctx->rkey = dma_translation.rdma.rkey;
_ctx->addr = dma_translation.addr;
_ctx->length = dma_translation.len;
} else {
rc = spdk_rdma_get_translation(rqpair->mr_map, _ctx->addr, _ctx->length, &rdma_translation);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("RDMA memory translation failed, rc %d\n", rc);
return rc;
}
if (rdma_translation.translation_type == SPDK_RDMA_TRANSLATION_MR) {
_ctx->lkey = rdma_translation.mr_or_key.mr->lkey;
_ctx->rkey = rdma_translation.mr_or_key.mr->rkey;
} else {
_ctx->lkey = _ctx->rkey = (uint32_t)rdma_translation.mr_or_key.key;
}
}
return 0;
}
/*
* Build SGL describing empty payload.
*/
@ -1376,21 +1432,21 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
struct spdk_nvme_rdma_req *rdma_req)
{
struct nvme_request *req = rdma_req->req;
struct spdk_rdma_memory_translation mem_translation;
void *payload;
struct nvme_rdma_memory_translation_ctx ctx = {
.addr = req->payload.contig_or_cb_arg + req->payload_offset,
.length = req->payload_size
};
int rc;
payload = req->payload.contig_or_cb_arg + req->payload_offset;
assert(req->payload_size != 0);
assert(ctx.length != 0);
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
rc = spdk_rdma_get_translation(rqpair->mr_map, payload, req->payload_size, &mem_translation);
rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
return -1;
}
rdma_req->send_sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
rdma_req->send_sgl[1].lkey = ctx.lkey;
/* The first element of this SGL is pointing at an
* spdk_nvmf_cmd object. For this particular command,
@ -1398,8 +1454,8 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
* the NVMe command. */
rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);
rdma_req->send_sgl[1].addr = (uint64_t)payload;
rdma_req->send_sgl[1].length = (uint32_t)req->payload_size;
rdma_req->send_sgl[1].addr = (uint64_t)ctx.addr;
rdma_req->send_sgl[1].length = (uint32_t)ctx.length;
/* The RDMA SGL contains two elements. The first describes
* the NVMe command and the second describes the data
@ -1409,7 +1465,7 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)req->payload_size;
req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)ctx.length;
/* Inline only supported for icdoff == 0 currently. This function will
* not get called for controllers with other values. */
req->cmd.dptr.sgl1.address = (uint64_t)0;
@ -1425,8 +1481,10 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
struct spdk_nvme_rdma_req *rdma_req)
{
struct nvme_request *req = rdma_req->req;
void *payload = req->payload.contig_or_cb_arg + req->payload_offset;
struct spdk_rdma_memory_translation mem_translation;
struct nvme_rdma_memory_translation_ctx ctx = {
.addr = req->payload.contig_or_cb_arg + req->payload_offset,
.length = req->payload_size
};
int rc;
assert(req->payload_size != 0);
@ -1438,13 +1496,12 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
return -1;
}
rc = spdk_rdma_get_translation(rqpair->mr_map, payload, req->payload_size, &mem_translation);
rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
return -1;
}
req->cmd.dptr.sgl1.keyed.key = spdk_rdma_memory_translation_get_rkey(&mem_translation);
req->cmd.dptr.sgl1.keyed.key = ctx.rkey;
/* The first element of this SGL is pointing at an
* spdk_nvmf_cmd object. For this particular command,
@ -1458,8 +1515,8 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
req->cmd.dptr.sgl1.keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
req->cmd.dptr.sgl1.keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
req->cmd.dptr.sgl1.keyed.length = req->payload_size;
req->cmd.dptr.sgl1.address = (uint64_t)payload;
req->cmd.dptr.sgl1.keyed.length = (uint32_t)ctx.length;
req->cmd.dptr.sgl1.address = (uint64_t)ctx.addr;
return 0;
}
@ -1473,8 +1530,7 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
{
struct nvme_request *req = rdma_req->req;
struct spdk_nvmf_cmd *cmd = &rqpair->cmds[rdma_req->id];
struct spdk_rdma_memory_translation mem_translation;
void *virt_addr;
struct nvme_rdma_memory_translation_ctx ctx;
uint32_t remaining_size;
uint32_t sge_length;
int rc, max_num_sgl, num_sgl_desc;
@ -1490,7 +1546,7 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
remaining_size = req->payload_size;
num_sgl_desc = 0;
do {
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &sge_length);
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &ctx.addr, &sge_length);
if (rc) {
return -1;
}
@ -1502,19 +1558,19 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
sge_length, NVME_RDMA_MAX_KEYED_SGL_LENGTH);
return -1;
}
rc = spdk_rdma_get_translation(rqpair->mr_map, virt_addr, sge_length, &mem_translation);
ctx.length = sge_length;
rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
return -1;
}
cmd->sgl[num_sgl_desc].keyed.key = spdk_rdma_memory_translation_get_rkey(&mem_translation);
cmd->sgl[num_sgl_desc].keyed.key = ctx.rkey;
cmd->sgl[num_sgl_desc].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
cmd->sgl[num_sgl_desc].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
cmd->sgl[num_sgl_desc].keyed.length = sge_length;
cmd->sgl[num_sgl_desc].address = (uint64_t)virt_addr;
cmd->sgl[num_sgl_desc].keyed.length = (uint32_t)ctx.length;
cmd->sgl[num_sgl_desc].address = (uint64_t)ctx.addr;
remaining_size -= sge_length;
remaining_size -= ctx.length;
num_sgl_desc++;
} while (remaining_size > 0 && num_sgl_desc < max_num_sgl);
@ -1577,9 +1633,8 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
struct spdk_nvme_rdma_req *rdma_req)
{
struct nvme_request *req = rdma_req->req;
struct spdk_rdma_memory_translation mem_translation;
struct nvme_rdma_memory_translation_ctx ctx;
uint32_t length;
void *virt_addr;
int rc;
assert(req->payload_size != 0);
@ -1588,7 +1643,7 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
assert(req->payload.next_sge_fn != NULL);
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &ctx.addr, &length);
if (rc) {
return -1;
}
@ -1602,15 +1657,15 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
length = req->payload_size;
}
rc = spdk_rdma_get_translation(rqpair->mr_map, virt_addr, length, &mem_translation);
ctx.length = length;
rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
return -1;
}
rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
rdma_req->send_sgl[1].length = length;
rdma_req->send_sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
rdma_req->send_sgl[1].addr = (uint64_t)ctx.addr;
rdma_req->send_sgl[1].length = (uint32_t)ctx.length;
rdma_req->send_sgl[1].lkey = ctx.lkey;
rdma_req->send_wr.num_sge = 2;
@ -1623,7 +1678,7 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)req->payload_size;
req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)ctx.length;
/* Inline only supported for icdoff == 0 currently. This function will
* not get called for controllers with other values. */
req->cmd.dptr.sgl1.address = (uint64_t)0;

View File

@ -155,6 +155,8 @@
spdk_nvme_ns_cmd_comparev;
spdk_nvme_ns_cmd_comparev_with_md;
spdk_nvme_ns_cmd_compare_with_md;
spdk_nvme_ns_cmd_writev_ext;
spdk_nvme_ns_cmd_readv_ext;
spdk_nvme_qpair_get_optimal_poll_group;
spdk_nvme_qpair_process_completions;

View File

@ -2089,6 +2089,146 @@ test_spdk_nvme_ns_cmd_readv_with_md(void)
cleanup_after_test(&qpair);
}
static void
test_spdk_nvme_ns_cmd_writev_ext(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_ns_cmd_ext_io_opts ext_opts = {
.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts),
.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
.memory_domain_ctx = (void *)0xf00df00d,
.metadata = (void *)0xdeadbeef,
.apptag_mask = 0xf,
.apptag = 0xff
};
int rc = 0;
uint32_t lba_count = 256;
uint32_t sector_size = 512;
uint32_t md_size = 128;
uint64_t sge_length = lba_count * sector_size;
prepare_for_test(&ns, &ctrlr, &qpair, sector_size,
md_size, 128 * 1024, 0, false);
/* Invalid io_flags. Expect fail */
ext_opts.io_flags = 0xFFFF000F;
rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, nvme_request_reset_sgl,
nvme_request_next_sge, &ext_opts);
CU_ASSERT(rc != 0);
ext_opts.io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
/* Empty reset_sgl cb. Expect fail */
rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, NULL,
nvme_request_next_sge, &ext_opts);
CU_ASSERT(rc != 0);
/* Empty next_sgl cb. Expect fail */
rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, nvme_request_reset_sgl,
NULL, &ext_opts);
CU_ASSERT(rc != 0);
/* Expect pass */
rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, nvme_request_reset_sgl,
nvme_request_next_sge, &ext_opts);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
CU_ASSERT(g_request->payload.md == (void *)0xDEADBEEF);
CU_ASSERT(g_request->payload.opts == &ext_opts);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK) == ext_opts.io_flags);
CU_ASSERT(g_request->cmd.cdw15 >> 16 == ext_opts.apptag_mask);
CU_ASSERT((g_request->cmd.cdw15 & 0xff) == ext_opts.apptag);
CU_ASSERT(g_request->payload_size == 256 * 512);
CU_ASSERT(g_request->qpair == &qpair);
CU_ASSERT(g_request->md_offset == 0);
CU_ASSERT(g_request->payload_offset == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
}
static void
test_spdk_nvme_ns_cmd_readv_ext(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_ns_cmd_ext_io_opts ext_opts = {
.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts),
.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
.memory_domain_ctx = (void *)0xf00df00d,
.metadata = (void *)0xdeadbeef,
.apptag_mask = 0xf,
.apptag = 0xff
};
int rc = 0;
uint32_t lba_count = 256;
uint32_t sector_size = 512;
uint32_t md_size = 128;
uint64_t sge_length = lba_count * sector_size;
prepare_for_test(&ns, &ctrlr, &qpair, sector_size,
md_size, 128 * 1024, 0, false);
/* Invalid io_flags. Expect fail */
ext_opts.io_flags = 0xFFFF000F;
rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, nvme_request_reset_sgl,
nvme_request_next_sge, &ext_opts);
CU_ASSERT(rc != 0);
ext_opts.io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
/* Empty reset_sgl cb. Expect fail */
rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, NULL,
nvme_request_next_sge, &ext_opts);
CU_ASSERT(rc != 0);
/* Empty next_sgl cb. Expect fail */
rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, nvme_request_reset_sgl,
NULL, &ext_opts);
CU_ASSERT(rc != 0);
/* Expect pass */
rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count,
NULL, &sge_length, nvme_request_reset_sgl,
nvme_request_next_sge, &ext_opts);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
CU_ASSERT(g_request->payload.md == (void *)0xDEADBEEF);
CU_ASSERT(g_request->payload.opts == &ext_opts);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK) == ext_opts.io_flags);
CU_ASSERT(g_request->cmd.cdw15 >> 16 == ext_opts.apptag_mask);
CU_ASSERT((g_request->cmd.cdw15 & 0xff) == ext_opts.apptag);
CU_ASSERT(g_request->payload_size == 256 * 512);
CU_ASSERT(g_request->qpair == &qpair);
CU_ASSERT(g_request->md_offset == 0);
CU_ASSERT(g_request->payload_offset == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -2126,6 +2266,8 @@ int main(int argc, char **argv)
CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev_with_md);
CU_ADD_TEST(suite, test_nvme_ns_cmd_setup_request);
CU_ADD_TEST(suite, test_spdk_nvme_ns_cmd_readv_with_md);
CU_ADD_TEST(suite, test_spdk_nvme_ns_cmd_writev_ext);
CU_ADD_TEST(suite, test_spdk_nvme_ns_cmd_readv_ext);
g_spdk_nvme_driver = &_g_nvme_driver;

View File

@ -62,6 +62,15 @@ DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
DEFINE_STUB(spdk_memory_domain_get_context, struct spdk_memory_domain_ctx *,
(struct spdk_memory_domain *device), NULL);
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA);
DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device));
DEFINE_STUB(spdk_memory_domain_fetch_data, int, (struct spdk_memory_domain *src_domain,
void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
uint32_t dst_iov_cnt, spdk_memory_domain_fetch_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
DEFINE_RETURN_MOCK(spdk_memory_domain_create, int);
int
spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,
@ -76,18 +85,21 @@ spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_devi
return 0;
}
DEFINE_STUB(spdk_memory_domain_get_context, struct spdk_memory_domain_ctx *,
(struct spdk_memory_domain *device), NULL);
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA);
DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device));
DEFINE_STUB(spdk_memory_domain_fetch_data, int, (struct spdk_memory_domain *src_domain,
void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
uint32_t dst_iov_cnt, spdk_memory_domain_fetch_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
DEFINE_STUB(spdk_memory_domain_translate_data, int, (struct spdk_memory_domain *src_domain,
void *src_domain_ctx, struct spdk_memory_domain *dst_domain,
struct spdk_memory_domain_translation_ctx *dst_domain_ctx, void *addr, size_t len,
struct spdk_memory_domain_translation_result *result), 0);
static struct spdk_memory_domain_translation_result g_memory_translation_translation = {.size = sizeof(struct spdk_memory_domain_translation_result) };
DEFINE_RETURN_MOCK(spdk_memory_domain_translate_data, int);
int
spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
{
HANDLE_RETURN_MOCK(spdk_memory_domain_translate_data);
memcpy(result, &g_memory_translation_translation, sizeof(g_memory_translation_translation));
return 0;
}
/* ibv_reg_mr can be a macro, need to undefine it */
#ifdef ibv_reg_mr
@ -109,6 +121,7 @@ ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
struct nvme_rdma_ut_bdev_io {
struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
int iovpos;
int iovcnt;
};
DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **);
@ -172,6 +185,10 @@ static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
if (bio->iovpos == bio->iovcnt) {
return -1;
}
iov = &bio->iovs[bio->iovpos];
*address = iov->iov_base;
@ -189,7 +206,7 @@ test_nvme_rdma_build_sgl_request(void)
struct spdk_nvmf_cmd cmd = {{0}};
struct spdk_nvme_rdma_req rdma_req = {0};
struct nvme_request req = {{0}};
struct nvme_rdma_ut_bdev_io bio;
struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
uint64_t i;
int rc;
@ -211,7 +228,7 @@ test_nvme_rdma_build_sgl_request(void)
req.qpair = &rqpair.qpair;
for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
bio.iovs[i].iov_base = (void *)i;
bio.iovs[i].iov_base = (void *)i + 1;
bio.iovs[i].iov_len = 0;
}
@ -264,12 +281,14 @@ test_nvme_rdma_build_sgl_request(void)
/* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
bio.iovpos = 0;
bio.iovcnt = 4;
req.payload_offset = 0;
req.payload_size = 0x6000;
g_mr_size = 0x0;
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
SPDK_CU_ASSERT_FATAL(rc != 0);
CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
CU_ASSERT(bio.iovpos == bio.iovcnt);
bio.iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS;
/* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
req.payload_size = 0x1000 + (1 << 24);
@ -298,7 +317,7 @@ test_nvme_rdma_build_sgl_inline_request(void)
struct spdk_nvmf_cmd cmd = {{0}};
struct spdk_nvme_rdma_req rdma_req = {0};
struct nvme_request req = {{0}};
struct nvme_rdma_ut_bdev_io bio;
struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
int rc;
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
@ -796,7 +815,7 @@ test_nvme_rdma_req_init(void)
struct spdk_nvmf_cmd cmd = {};
struct spdk_nvme_rdma_req rdma_req = {};
struct nvme_request req = {};
struct nvme_rdma_ut_bdev_io bio = {};
struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
int rc = 1;
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
@ -1215,6 +1234,58 @@ test_rdma_ctrlr_get_memory_domain(void)
CU_ASSERT(nvme_rdma_ctrlr_get_memory_domain(&rctrlr.ctrlr) == domain);
}
static void
test_rdma_get_memory_translation(void)
{
struct ibv_qp qp = { .pd = (struct ibv_pd *)0xfeedbeef };
struct spdk_rdma_qp rdma_qp = { .qp = &qp };
struct nvme_rdma_qpair rqpair = { .rdma_qp = &rdma_qp };
struct spdk_nvme_ns_cmd_ext_io_opts io_opts = {
.memory_domain = (struct spdk_memory_domain *)0xdeaddead
};
struct nvme_request req = { .payload = { .opts = &io_opts } };
struct nvme_rdma_memory_translation_ctx ctx = {
.addr = (void *)0xBAADF00D,
.length = 0x100
};
int rc;
rqpair.memory_domain = nvme_rdma_get_memory_domain(rqpair.rdma_qp->qp->pd);
SPDK_CU_ASSERT_FATAL(rqpair.memory_domain != NULL);
/* case 1, using extended IO opts with DMA device.
* Test 1 - spdk_dma_translate_data error, expect fail */
MOCK_SET(spdk_memory_domain_translate_data, -1);
rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
CU_ASSERT(rc != 0);
MOCK_CLEAR(spdk_memory_domain_translate_data);
/* Test 2 - expect pass */
g_memory_translation_translation.rdma.lkey = 123;
g_memory_translation_translation.rdma.rkey = 321;
rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
CU_ASSERT(rc == 0);
CU_ASSERT(ctx.lkey == g_memory_translation_translation.rdma.lkey);
CU_ASSERT(ctx.rkey == g_memory_translation_translation.rdma.rkey);
/* case 2, using rdma translation
* Test 1 - spdk_rdma_get_translation error, expect fail */
req.payload.opts = NULL;
MOCK_SET(spdk_rdma_get_translation, -1);
rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
CU_ASSERT(rc != 0);
MOCK_CLEAR(spdk_rdma_get_translation);
/* Test 2 - expect pass */
rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
CU_ASSERT(rc == 0);
CU_ASSERT(ctx.lkey == RDMA_UT_LKEY);
CU_ASSERT(ctx.rkey == RDMA_UT_RKEY);
/* Cleanup */
nvme_rdma_put_memory_domain(rqpair.memory_domain);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -1245,6 +1316,7 @@ int main(int argc, char **argv)
CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);
CU_ADD_TEST(suite, test_nvme_rdma_memory_domain);
CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domain);
CU_ADD_TEST(suite, test_rdma_get_memory_translation);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();