lib/nvmf : Added support for ZCOPY
Implemented nvmf code to allow transports to use ZCOPY. Note ZCOPY has to be enabled within the individual transport layer Signed-off-by: matthewb <matthew.burbridge@hpe.com> Change-Id: I273b3d4ab44d882c916ac39e821505e1f4211ded Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6817 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Xiaodong Liu <xiaodong.liu@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
5a169179be
commit
5818b42fce
@ -63,6 +63,9 @@ the uint128 related data.
|
||||
|
||||
Removed deprecated `net` library.
|
||||
|
||||
Added support for ZCOPY. ZCOPY is used in preference to READ and WRITE if the
|
||||
bdev module supports ZCOPY and the bdev module has ZCOPY enabled.
|
||||
|
||||
### nvme
|
||||
|
||||
`spdk_nvme_map_prps` and `spdk_nvme_map_cmd` were moved to nvmf/vfio-user as internal APIs
|
||||
|
@ -78,6 +78,21 @@ struct spdk_nvmf_dif_info {
|
||||
uint32_t orig_length;
|
||||
};
|
||||
|
||||
enum spdk_nvmf_zcopy_phase {
|
||||
NVMF_ZCOPY_PHASE_NONE, /* Request is not using ZCOPY */
|
||||
NVMF_ZCOPY_PHASE_INIT, /* Requesting Buffers */
|
||||
NVMF_ZCOPY_PHASE_EXECUTE, /* Got buffers processing commands */
|
||||
NVMF_ZCOPY_PHASE_END_PENDING, /* Releasing buffers */
|
||||
NVMF_ZCOPY_PHASE_COMPLETE, /* Buffers Released */
|
||||
NVMF_ZCOPY_PHASE_INIT_FAILED /* Failed to get the buffers */
|
||||
};
|
||||
|
||||
static inline bool
|
||||
spdk_nvmf_using_zcopy(enum spdk_nvmf_zcopy_phase phase)
|
||||
{
|
||||
return (phase != NVMF_ZCOPY_PHASE_NONE);
|
||||
}
|
||||
|
||||
struct spdk_nvmf_request {
|
||||
struct spdk_nvmf_qpair *qpair;
|
||||
uint32_t length;
|
||||
@ -101,6 +116,8 @@ struct spdk_nvmf_request {
|
||||
struct spdk_nvmf_request *first_fused_req;
|
||||
struct spdk_nvmf_request *req_to_abort;
|
||||
struct spdk_poller *poller;
|
||||
struct spdk_bdev_io *zcopy_bdev_io; /* Contains the bdev_io when using ZCOPY */
|
||||
enum spdk_nvmf_zcopy_phase zcopy_phase;
|
||||
|
||||
TAILQ_ENTRY(spdk_nvmf_request) link;
|
||||
};
|
||||
@ -426,6 +443,8 @@ void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
|
||||
void spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req);
|
||||
int spdk_nvmf_request_free(struct spdk_nvmf_request *req);
|
||||
int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);
|
||||
int spdk_nvmf_request_zcopy_start(struct spdk_nvmf_request *req);
|
||||
int spdk_nvmf_request_zcopy_end(struct spdk_nvmf_request *req);
|
||||
|
||||
/**
|
||||
* Remove the given qpair from the poll group.
|
||||
|
203
lib/nvmf/ctrlr.c
203
lib/nvmf/ctrlr.c
@ -775,11 +775,20 @@ nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req)
|
||||
return &req->qpair->group->sgroups[subsystem->id];
|
||||
}
|
||||
|
||||
static void
|
||||
nvmf_add_to_outstanding_queue(struct spdk_nvmf_request *req)
|
||||
{
|
||||
if (!spdk_nvmf_using_zcopy(req->zcopy_phase)) {
|
||||
/* if using zcopy then request has been added when the start zcopy was actioned */
|
||||
struct spdk_nvmf_qpair *qpair = req->qpair;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
|
||||
{
|
||||
struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
|
||||
struct spdk_nvmf_qpair *qpair = req->qpair;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
||||
enum spdk_nvmf_request_exec_status status;
|
||||
|
||||
@ -791,7 +800,7 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
|
||||
}
|
||||
|
||||
sgroup->mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
nvmf_add_to_outstanding_queue(req);
|
||||
|
||||
status = _nvmf_ctrlr_connect(req);
|
||||
|
||||
@ -3516,6 +3525,112 @@ nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request *req, struct spdk_bdev
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool
|
||||
nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req)
|
||||
{
|
||||
struct spdk_nvmf_ns *ns;
|
||||
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
|
||||
|
||||
if (nvmf_qpair_is_admin_queue(req->qpair)) {
|
||||
/* Admin queue */
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((req->cmd->nvme_cmd.opc != SPDK_NVME_OPC_WRITE) &&
|
||||
(req->cmd->nvme_cmd.opc != SPDK_NVME_OPC_READ)) {
|
||||
/* Not a READ or WRITE command */
|
||||
return false;
|
||||
}
|
||||
|
||||
if (req->cmd->nvme_cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) {
|
||||
/* Fused commands dont use zcopy buffers */
|
||||
return false;
|
||||
}
|
||||
|
||||
ns = _nvmf_subsystem_get_ns(req->qpair->ctrlr->subsys, req->cmd->nvme_cmd.nsid);
|
||||
if (ns == NULL || ns->bdev == NULL || !ns->zcopy) {
|
||||
return false;
|
||||
}
|
||||
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_INIT;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If this function returns a non-zero value the request
|
||||
* reverts to using SPDK buffers
|
||||
*/
|
||||
int
|
||||
spdk_nvmf_request_zcopy_start(struct spdk_nvmf_request *req)
|
||||
{
|
||||
struct spdk_nvmf_qpair *qpair = req->qpair;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
|
||||
struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
|
||||
uint32_t nsid;
|
||||
struct spdk_bdev *bdev;
|
||||
struct spdk_bdev_desc *desc;
|
||||
struct spdk_io_channel *ch;
|
||||
int rc;
|
||||
|
||||
if (!qpair->ctrlr) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (qpair->group->sgroups == NULL) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
rc = spdk_nvmf_request_get_bdev(req->cmd->nvme_cmd.nsid, req,
|
||||
&bdev, &desc, &ch);
|
||||
if (rc != 0) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (ch == NULL) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
nsid = req->cmd->nvme_cmd.nsid;
|
||||
sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
|
||||
ns_info = &sgroup->ns_info[nsid - 1];
|
||||
if (ns_info->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* backward compatible */
|
||||
req->data = req->iov[0].iov_base;
|
||||
|
||||
/* Set iovcnt to be the maximum number of
|
||||
* iovs that the ZCOPY can use
|
||||
*/
|
||||
req->iovcnt = NVMF_REQ_MAX_BUFFERS;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
rc = nvmf_bdev_ctrlr_start_zcopy(bdev, desc, ch, req);
|
||||
if (rc == 0) {
|
||||
ns_info->io_outstanding++;
|
||||
return 0;
|
||||
}
|
||||
TAILQ_REMOVE(&qpair->outstanding, req, link);
|
||||
|
||||
end:
|
||||
/* An error occurred, the subsystem is paused, or the qpair is not active.
|
||||
* Revert to using SPDK buffers
|
||||
*/
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_request_zcopy_end(struct spdk_nvmf_request *req)
|
||||
{
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_END_PENDING;
|
||||
return nvmf_bdev_ctrlr_end_zcopy(req);
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
|
||||
{
|
||||
@ -3691,7 +3806,30 @@ _nvmf_request_complete(void *ctx)
|
||||
spdk_nvme_print_completion(qpair->qid, rsp);
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&qpair->outstanding, req, link);
|
||||
switch (req->zcopy_phase) {
|
||||
case NVMF_ZCOPY_PHASE_NONE:
|
||||
TAILQ_REMOVE(&qpair->outstanding, req, link);
|
||||
break;
|
||||
case NVMF_ZCOPY_PHASE_INIT:
|
||||
if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp))) {
|
||||
/* The START failed or was aborted so revert to a normal IO */
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_INIT_FAILED;
|
||||
TAILQ_REMOVE(&qpair->outstanding, req, link);
|
||||
} else {
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_EXECUTE;
|
||||
}
|
||||
break;
|
||||
case NVMF_ZCOPY_PHASE_EXECUTE:
|
||||
break;
|
||||
case NVMF_ZCOPY_PHASE_END_PENDING:
|
||||
TAILQ_REMOVE(&qpair->outstanding, req, link);
|
||||
req->zcopy_phase = NVMF_ZCOPY_PHASE_COMPLETE;
|
||||
break;
|
||||
default:
|
||||
SPDK_ERRLOG("Invalid ZCOPY phase %u\n", req->zcopy_phase);
|
||||
break;
|
||||
}
|
||||
|
||||
if (nvmf_transport_req_complete(req)) {
|
||||
SPDK_ERRLOG("Transport request completion error!\n");
|
||||
}
|
||||
@ -3703,9 +3841,14 @@ _nvmf_request_complete(void *ctx)
|
||||
assert(sgroup->mgmt_io_outstanding > 0);
|
||||
sgroup->mgmt_io_outstanding--;
|
||||
} else {
|
||||
/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
|
||||
if (spdk_likely(nsid - 1 < sgroup->num_ns)) {
|
||||
sgroup->ns_info[nsid - 1].io_outstanding--;
|
||||
if ((req->zcopy_phase == NVMF_ZCOPY_PHASE_NONE) ||
|
||||
(req->zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE)) {
|
||||
/* End of request */
|
||||
|
||||
/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
|
||||
if (spdk_likely(nsid - 1 < sgroup->num_ns)) {
|
||||
sgroup->ns_info[nsid - 1].io_outstanding--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3767,7 +3910,7 @@ spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req)
|
||||
sgroup->mgmt_io_outstanding++;
|
||||
|
||||
/* Place the request on the outstanding list so we can keep track of it */
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
nvmf_add_to_outstanding_queue(req);
|
||||
|
||||
assert(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC);
|
||||
status = nvmf_ctrlr_process_fabrics_cmd(req);
|
||||
@ -3777,13 +3920,11 @@ spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
static bool nvmf_check_subsystem_active(struct spdk_nvmf_request *req)
|
||||
{
|
||||
struct spdk_nvmf_qpair *qpair = req->qpair;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
|
||||
struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
|
||||
enum spdk_nvmf_request_exec_status status;
|
||||
uint32_t nsid;
|
||||
|
||||
if (qpair->ctrlr) {
|
||||
@ -3800,7 +3941,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
||||
/* The subsystem is not currently active. Queue this request. */
|
||||
TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
sgroup->mgmt_io_outstanding++;
|
||||
} else {
|
||||
@ -3811,9 +3952,9 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
|
||||
req->rsp->nvme_cpl.status.dnr = 1;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
nvmf_add_to_outstanding_queue(req);
|
||||
_nvmf_request_complete(req);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
ns_info = &sgroup->ns_info[nsid - 1];
|
||||
@ -3825,27 +3966,43 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
|
||||
req->rsp->nvme_cpl.status.dnr = 1;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
nvmf_add_to_outstanding_queue(req);
|
||||
ns_info->io_outstanding++;
|
||||
_nvmf_request_complete(req);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ns_info->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
||||
/* The namespace is not currently active. Queue this request. */
|
||||
TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
ns_info->io_outstanding++;
|
||||
}
|
||||
|
||||
if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
|
||||
nvmf_add_to_outstanding_queue(req);
|
||||
_nvmf_request_complete(req);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
_nvmf_request_complete(req);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
{
|
||||
struct spdk_nvmf_qpair *qpair = req->qpair;
|
||||
enum spdk_nvmf_request_exec_status status;
|
||||
|
||||
if (!spdk_nvmf_using_zcopy(req->zcopy_phase)) {
|
||||
if (!nvmf_check_subsystem_active(req)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) {
|
||||
@ -3853,7 +4010,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
}
|
||||
|
||||
/* Place the request on the outstanding list so we can keep track of it */
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
nvmf_add_to_outstanding_queue(req);
|
||||
|
||||
if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) {
|
||||
status = nvmf_ctrlr_process_fabrics_cmd(req);
|
||||
|
@ -267,6 +267,12 @@ nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
|
||||
req->qpair->group->stat.pending_bdev_io++;
|
||||
}
|
||||
|
||||
bool
|
||||
nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
|
||||
{
|
||||
return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY);
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
|
||||
@ -296,6 +302,13 @@ nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||
}
|
||||
|
||||
if (req->zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE) {
|
||||
/* Return here after checking the lba etc */
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||
}
|
||||
|
||||
assert(!spdk_nvmf_using_zcopy(req->zcopy_phase));
|
||||
|
||||
rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
|
||||
nvmf_bdev_ctrlr_complete_cmd, req);
|
||||
if (spdk_unlikely(rc)) {
|
||||
@ -340,6 +353,13 @@ nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||
}
|
||||
|
||||
if (req->zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE) {
|
||||
/* Return here after checking the lba etc */
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||
}
|
||||
|
||||
assert(!spdk_nvmf_using_zcopy(req->zcopy_phase));
|
||||
|
||||
rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
|
||||
nvmf_bdev_ctrlr_complete_cmd, req);
|
||||
if (spdk_unlikely(rc)) {
|
||||
@ -769,3 +789,100 @@ nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
|
||||
|
||||
return (rc == 0) ? true : false;
|
||||
}
|
||||
|
||||
static void
|
||||
nvmf_bdev_ctrlr_start_zcopy_complete(struct spdk_bdev_io *bdev_io, bool success,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct spdk_nvmf_request *req = cb_arg;
|
||||
struct iovec *iov;
|
||||
int iovcnt;
|
||||
|
||||
if (spdk_unlikely(!success)) {
|
||||
int sc = 0, sct = 0;
|
||||
uint32_t cdw0 = 0;
|
||||
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
|
||||
spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
|
||||
|
||||
response->cdw0 = cdw0;
|
||||
response->status.sc = sc;
|
||||
response->status.sct = sct;
|
||||
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
spdk_nvmf_request_complete(req);
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt);
|
||||
|
||||
assert(iovcnt <= NVMF_REQ_MAX_BUFFERS);
|
||||
|
||||
req->iovcnt = iovcnt;
|
||||
|
||||
assert(req->iov == iov);
|
||||
|
||||
req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */
|
||||
|
||||
spdk_nvmf_request_complete(req);
|
||||
/* Don't free the bdev_io here as it is needed for the END ZCOPY */
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
|
||||
struct spdk_bdev_desc *desc,
|
||||
struct spdk_io_channel *ch,
|
||||
struct spdk_nvmf_request *req)
|
||||
{
|
||||
uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
|
||||
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
||||
uint64_t start_lba;
|
||||
uint64_t num_blocks;
|
||||
|
||||
nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks);
|
||||
|
||||
if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
|
||||
SPDK_ERRLOG("end of media\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(num_blocks * block_size > req->length)) {
|
||||
SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
|
||||
num_blocks, block_size, req->length);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false;
|
||||
|
||||
return spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
|
||||
num_blocks, populate, nvmf_bdev_ctrlr_start_zcopy_complete, req);
|
||||
}
|
||||
|
||||
static void
|
||||
nvmf_bdev_ctrlr_end_zcopy_complete(struct spdk_bdev_io *bdev_io, bool success,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct spdk_nvmf_request *req = cb_arg;
|
||||
|
||||
if (spdk_unlikely(!success)) {
|
||||
int sc = 0, sct = 0;
|
||||
uint32_t cdw0 = 0;
|
||||
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
|
||||
spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
|
||||
|
||||
response->cdw0 = cdw0;
|
||||
response->status.sc = sc;
|
||||
response->status.sct = sct;
|
||||
}
|
||||
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
req->zcopy_bdev_io = NULL;
|
||||
spdk_nvmf_request_complete(req);
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req)
|
||||
{
|
||||
bool commit = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) ? true : false;
|
||||
|
||||
return spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_end_zcopy_complete, req);
|
||||
}
|
||||
|
@ -1597,6 +1597,7 @@ nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
/* Release all queued requests */
|
||||
TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
|
||||
TAILQ_REMOVE(&sgroup->queued, req, link);
|
||||
assert(req->zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
spdk_nvmf_request_exec(req);
|
||||
}
|
||||
fini:
|
||||
|
@ -190,6 +190,8 @@ struct spdk_nvmf_ns {
|
||||
char *ptpl_file;
|
||||
/* Persist Through Power Loss feature is enabled */
|
||||
bool ptpl_activated;
|
||||
/* ZCOPY supported on bdev device */
|
||||
bool zcopy;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_ctrlr_feat {
|
||||
@ -343,6 +345,7 @@ int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
|
||||
bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
|
||||
bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
|
||||
|
||||
void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
|
||||
bool dif_insert_or_strip);
|
||||
@ -364,6 +367,7 @@ int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_de
|
||||
struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
|
||||
bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
|
||||
struct spdk_dif_ctx *dif_ctx);
|
||||
bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
|
||||
|
||||
int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
|
||||
struct spdk_nvmf_ctrlr *ctrlr);
|
||||
@ -442,4 +446,30 @@ nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
|
||||
return qpair->qid == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiates a zcopy start operation
|
||||
*
|
||||
* \param bdev The \ref spdk_bdev
|
||||
* \param desc The \ref spdk_bdev_desc
|
||||
* \param ch The \ref spdk_io_channel
|
||||
* \param req The \ref spdk_nvmf_request passed to the bdev for processing
|
||||
*
|
||||
* \return 0 upon success
|
||||
* \return <0 if the zcopy operation could not be started
|
||||
*/
|
||||
int nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
|
||||
struct spdk_bdev_desc *desc,
|
||||
struct spdk_io_channel *ch,
|
||||
struct spdk_nvmf_request *req);
|
||||
|
||||
/**
|
||||
* Ends a zcopy operation
|
||||
*
|
||||
* \param req The NVMe-oF request
|
||||
*
|
||||
* \return 0 upon success
|
||||
* \return <0 on error
|
||||
*/
|
||||
int nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req);
|
||||
|
||||
#endif /* __NVMF_INTERNAL_H__ */
|
||||
|
@ -110,6 +110,8 @@
|
||||
spdk_nvmf_request_exec;
|
||||
spdk_nvmf_request_free;
|
||||
spdk_nvmf_request_complete;
|
||||
spdk_nvmf_request_zcopy_start;
|
||||
spdk_nvmf_request_zcopy_end;
|
||||
spdk_nvmf_ctrlr_get_subsystem;
|
||||
spdk_nvmf_ctrlr_get_id;
|
||||
spdk_nvmf_req_get_xfer;
|
||||
|
@ -1456,6 +1456,9 @@ spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Cache the zcopy capability of the bdev device */
|
||||
ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY);
|
||||
|
||||
if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) {
|
||||
opts.uuid = *spdk_bdev_get_uuid(ns->bdev);
|
||||
}
|
||||
|
@ -45,11 +45,17 @@ SPDK_LOG_REGISTER_COMPONENT(nvmf)
|
||||
struct spdk_bdev {
|
||||
int ut_mock;
|
||||
uint64_t blockcnt;
|
||||
uint32_t blocklen;
|
||||
};
|
||||
|
||||
const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
|
||||
const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
|
||||
|
||||
static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
|
||||
static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
|
||||
0x8877665544332211UL;
|
||||
static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
|
||||
|
||||
DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
|
||||
struct spdk_nvmf_subsystem *,
|
||||
(struct spdk_nvmf_tgt *tgt, const char *subnqn),
|
||||
@ -250,6 +256,49 @@ spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool
|
||||
nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
|
||||
struct spdk_bdev_desc *desc,
|
||||
struct spdk_io_channel *ch,
|
||||
struct spdk_nvmf_request *req)
|
||||
{
|
||||
uint64_t start_lba;
|
||||
uint64_t num_blocks;
|
||||
|
||||
start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
|
||||
num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
|
||||
|
||||
if ((start_lba + num_blocks) > bdev->blockcnt) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
|
||||
req->zcopy_bdev_io = zcopy_start_bdev_io_write;
|
||||
} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
|
||||
req->zcopy_bdev_io = zcopy_start_bdev_io_read;
|
||||
} else {
|
||||
req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
|
||||
}
|
||||
|
||||
|
||||
spdk_nvmf_request_complete(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req)
|
||||
{
|
||||
req->zcopy_bdev_io = NULL;
|
||||
spdk_nvmf_request_complete(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
test_get_log_page(void)
|
||||
{
|
||||
@ -2123,6 +2172,378 @@ test_nvmf_ctrlr_create_destruct(void)
|
||||
CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvmf_ctrlr_use_zcopy(void)
|
||||
{
|
||||
struct spdk_nvmf_subsystem subsystem = {};
|
||||
struct spdk_nvmf_request req = {};
|
||||
struct spdk_nvmf_qpair qpair = {};
|
||||
struct spdk_nvmf_ctrlr ctrlr = {};
|
||||
union nvmf_h2c_msg cmd = {};
|
||||
struct spdk_nvmf_ns ns = {};
|
||||
struct spdk_nvmf_ns *subsys_ns[1] = {};
|
||||
struct spdk_bdev bdev = {};
|
||||
struct spdk_nvmf_poll_group group = {};
|
||||
struct spdk_nvmf_subsystem_poll_group sgroups = {};
|
||||
struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
|
||||
struct spdk_io_channel io_ch = {};
|
||||
int opc;
|
||||
|
||||
subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
|
||||
ns.bdev = &bdev;
|
||||
|
||||
subsystem.id = 0;
|
||||
subsystem.max_nsid = 1;
|
||||
subsys_ns[0] = &ns;
|
||||
subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
|
||||
|
||||
ctrlr.subsys = &subsystem;
|
||||
|
||||
qpair.ctrlr = &ctrlr;
|
||||
qpair.group = &group;
|
||||
qpair.qid = 1;
|
||||
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||
|
||||
group.thread = spdk_get_thread();
|
||||
group.num_sgroups = 1;
|
||||
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups.num_ns = 1;
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
ns_info.channel = &io_ch;
|
||||
sgroups.ns_info = &ns_info;
|
||||
TAILQ_INIT(&sgroups.queued);
|
||||
group.sgroups = &sgroups;
|
||||
TAILQ_INIT(&qpair.outstanding);
|
||||
|
||||
req.qpair = &qpair;
|
||||
req.cmd = &cmd;
|
||||
|
||||
/* Admin queue */
|
||||
qpair.qid = 0;
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
|
||||
qpair.qid = 1;
|
||||
|
||||
/* Invalid Opcodes */
|
||||
for (opc = 0; opc <= 255; opc++) {
|
||||
cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
|
||||
if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
|
||||
(cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
|
||||
}
|
||||
}
|
||||
cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
|
||||
/* Fused WRITE */
|
||||
cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
|
||||
cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
|
||||
|
||||
/* Non bdev */
|
||||
cmd.nvme_cmd.nsid = 4;
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
|
||||
cmd.nvme_cmd.nsid = 1;
|
||||
|
||||
/* ZCOPY Not supported */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
|
||||
|
||||
/* Success */
|
||||
ns.zcopy = true;
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
}
|
||||
|
||||
static void
|
||||
test_spdk_nvmf_request_zcopy_start(void)
|
||||
{
|
||||
struct spdk_nvmf_request req = {};
|
||||
struct spdk_nvmf_qpair qpair = {};
|
||||
struct spdk_nvme_cmd cmd = {};
|
||||
union nvmf_c2h_msg rsp = {};
|
||||
struct spdk_nvmf_ctrlr ctrlr = {};
|
||||
struct spdk_nvmf_subsystem subsystem = {};
|
||||
struct spdk_nvmf_ns ns = {};
|
||||
struct spdk_nvmf_ns *subsys_ns[1] = {};
|
||||
struct spdk_nvmf_subsystem_listener listener = {};
|
||||
struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
|
||||
|
||||
struct spdk_nvmf_poll_group group = {};
|
||||
struct spdk_nvmf_subsystem_poll_group sgroups = {};
|
||||
struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
|
||||
struct spdk_io_channel io_ch = {};
|
||||
|
||||
ns.bdev = &bdev;
|
||||
ns.zcopy = true;
|
||||
|
||||
subsystem.id = 0;
|
||||
subsystem.max_nsid = 1;
|
||||
subsys_ns[0] = &ns;
|
||||
subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
|
||||
|
||||
listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
|
||||
|
||||
/* Enable controller */
|
||||
ctrlr.vcprop.cc.bits.en = 1;
|
||||
ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
|
||||
ctrlr.listener = &listener;
|
||||
|
||||
group.thread = spdk_get_thread();
|
||||
group.num_sgroups = 1;
|
||||
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups.num_ns = 1;
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
ns_info.channel = &io_ch;
|
||||
sgroups.ns_info = &ns_info;
|
||||
TAILQ_INIT(&sgroups.queued);
|
||||
group.sgroups = &sgroups;
|
||||
TAILQ_INIT(&qpair.outstanding);
|
||||
|
||||
qpair.ctrlr = &ctrlr;
|
||||
qpair.group = &group;
|
||||
qpair.qid = 1;
|
||||
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||
|
||||
cmd.nsid = 1;
|
||||
|
||||
req.qpair = &qpair;
|
||||
req.cmd = (union nvmf_h2c_msg *)&cmd;
|
||||
req.rsp = &rsp;
|
||||
cmd.opc = SPDK_NVME_OPC_READ;
|
||||
|
||||
/* Fail because no controller */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
qpair.ctrlr = NULL;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
qpair.ctrlr = &ctrlr;
|
||||
|
||||
/* Fail because no sgroup */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
group.sgroups = NULL;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
group.sgroups = &sgroups;
|
||||
|
||||
/* Fail because bad NSID */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
cmd.nsid = 0;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
cmd.nsid = 1;
|
||||
|
||||
/* Fail because bad Channel */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
ns_info.channel = NULL;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
ns_info.channel = &io_ch;
|
||||
|
||||
/* Fail because NSID is not active */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
|
||||
/* Fail because QPair is not active */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||
|
||||
/* Fail because nvmf_bdev_ctrlr_start_zcopy fails */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */
|
||||
cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
|
||||
req.length = (cmd.cdw12 + 1) * bdev.blocklen;
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
|
||||
cmd.cdw10 = 0;
|
||||
cmd.cdw12 = 0;
|
||||
|
||||
/* Success */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
|
||||
}
|
||||
|
||||
static void
|
||||
test_zcopy_read(void)
|
||||
{
|
||||
struct spdk_nvmf_request req = {};
|
||||
struct spdk_nvmf_qpair qpair = {};
|
||||
struct spdk_nvme_cmd cmd = {};
|
||||
union nvmf_c2h_msg rsp = {};
|
||||
struct spdk_nvmf_ctrlr ctrlr = {};
|
||||
struct spdk_nvmf_subsystem subsystem = {};
|
||||
struct spdk_nvmf_ns ns = {};
|
||||
struct spdk_nvmf_ns *subsys_ns[1] = {};
|
||||
struct spdk_nvmf_subsystem_listener listener = {};
|
||||
struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
|
||||
|
||||
struct spdk_nvmf_poll_group group = {};
|
||||
struct spdk_nvmf_subsystem_poll_group sgroups = {};
|
||||
struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
|
||||
struct spdk_io_channel io_ch = {};
|
||||
|
||||
ns.bdev = &bdev;
|
||||
ns.zcopy = true;
|
||||
|
||||
subsystem.id = 0;
|
||||
subsystem.max_nsid = 1;
|
||||
subsys_ns[0] = &ns;
|
||||
subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
|
||||
|
||||
listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
|
||||
|
||||
/* Enable controller */
|
||||
ctrlr.vcprop.cc.bits.en = 1;
|
||||
ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
|
||||
ctrlr.listener = &listener;
|
||||
|
||||
group.thread = spdk_get_thread();
|
||||
group.num_sgroups = 1;
|
||||
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups.num_ns = 1;
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
ns_info.channel = &io_ch;
|
||||
sgroups.ns_info = &ns_info;
|
||||
TAILQ_INIT(&sgroups.queued);
|
||||
group.sgroups = &sgroups;
|
||||
TAILQ_INIT(&qpair.outstanding);
|
||||
|
||||
qpair.ctrlr = &ctrlr;
|
||||
qpair.group = &group;
|
||||
qpair.qid = 1;
|
||||
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||
|
||||
cmd.nsid = 1;
|
||||
|
||||
req.qpair = &qpair;
|
||||
req.cmd = (union nvmf_h2c_msg *)&cmd;
|
||||
req.rsp = &rsp;
|
||||
cmd.opc = SPDK_NVME_OPC_READ;
|
||||
|
||||
/* Prepare for zcopy */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == NULL);
|
||||
CU_ASSERT(ns_info.io_outstanding == 0);
|
||||
|
||||
/* Perform the zcopy start */
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
|
||||
CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == &req);
|
||||
CU_ASSERT(ns_info.io_outstanding == 1);
|
||||
|
||||
/* Execute the request */
|
||||
spdk_nvmf_request_exec(&req);
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == &req);
|
||||
CU_ASSERT(ns_info.io_outstanding == 1);
|
||||
|
||||
/* Perform the zcopy end */
|
||||
spdk_nvmf_request_zcopy_end(&req);
|
||||
CU_ASSERT(req.zcopy_bdev_io == NULL);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == NULL);
|
||||
CU_ASSERT(ns_info.io_outstanding == 0);
|
||||
}
|
||||
|
||||
static void
|
||||
test_zcopy_write(void)
|
||||
{
|
||||
struct spdk_nvmf_request req = {};
|
||||
struct spdk_nvmf_qpair qpair = {};
|
||||
struct spdk_nvme_cmd cmd = {};
|
||||
union nvmf_c2h_msg rsp = {};
|
||||
struct spdk_nvmf_ctrlr ctrlr = {};
|
||||
struct spdk_nvmf_subsystem subsystem = {};
|
||||
struct spdk_nvmf_ns ns = {};
|
||||
struct spdk_nvmf_ns *subsys_ns[1] = {};
|
||||
struct spdk_nvmf_subsystem_listener listener = {};
|
||||
struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
|
||||
|
||||
struct spdk_nvmf_poll_group group = {};
|
||||
struct spdk_nvmf_subsystem_poll_group sgroups = {};
|
||||
struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
|
||||
struct spdk_io_channel io_ch = {};
|
||||
|
||||
ns.bdev = &bdev;
|
||||
ns.zcopy = true;
|
||||
|
||||
subsystem.id = 0;
|
||||
subsystem.max_nsid = 1;
|
||||
subsys_ns[0] = &ns;
|
||||
subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
|
||||
|
||||
listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
|
||||
|
||||
/* Enable controller */
|
||||
ctrlr.vcprop.cc.bits.en = 1;
|
||||
ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
|
||||
ctrlr.listener = &listener;
|
||||
|
||||
group.thread = spdk_get_thread();
|
||||
group.num_sgroups = 1;
|
||||
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups.num_ns = 1;
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
ns_info.channel = &io_ch;
|
||||
sgroups.ns_info = &ns_info;
|
||||
TAILQ_INIT(&sgroups.queued);
|
||||
group.sgroups = &sgroups;
|
||||
TAILQ_INIT(&qpair.outstanding);
|
||||
|
||||
qpair.ctrlr = &ctrlr;
|
||||
qpair.group = &group;
|
||||
qpair.qid = 1;
|
||||
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||
|
||||
cmd.nsid = 1;
|
||||
|
||||
req.qpair = &qpair;
|
||||
req.cmd = (union nvmf_h2c_msg *)&cmd;
|
||||
req.rsp = &rsp;
|
||||
cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
|
||||
/* Prepare for zcopy */
|
||||
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == NULL);
|
||||
CU_ASSERT(ns_info.io_outstanding == 0);
|
||||
|
||||
/* Perform the zcopy start */
|
||||
CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
|
||||
CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == &req);
|
||||
CU_ASSERT(ns_info.io_outstanding == 1);
|
||||
|
||||
/* Execute the request */
|
||||
spdk_nvmf_request_exec(&req);
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == &req);
|
||||
CU_ASSERT(ns_info.io_outstanding == 1);
|
||||
|
||||
/* Perform the zcopy end */
|
||||
spdk_nvmf_request_zcopy_end(&req);
|
||||
CU_ASSERT(req.zcopy_bdev_io == NULL);
|
||||
CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
|
||||
CU_ASSERT(qpair.outstanding.tqh_first == NULL);
|
||||
CU_ASSERT(ns_info.io_outstanding == 0);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite = NULL;
|
||||
@ -2152,6 +2573,10 @@ int main(int argc, char **argv)
|
||||
CU_ADD_TEST(suite, test_multi_async_events);
|
||||
CU_ADD_TEST(suite, test_rae);
|
||||
CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
|
||||
CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
|
||||
CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
|
||||
CU_ADD_TEST(suite, test_zcopy_read);
|
||||
CU_ADD_TEST(suite, test_zcopy_write);
|
||||
|
||||
allocate_threads(1);
|
||||
set_thread(0);
|
||||
|
@ -67,6 +67,9 @@ DEFINE_STUB(spdk_bdev_abort, int,
|
||||
(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
||||
void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
|
||||
|
||||
DEFINE_STUB_V(spdk_bdev_io_get_iovec,
|
||||
(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
|
||||
|
||||
uint32_t
|
||||
spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
|
||||
{
|
||||
@ -212,6 +215,19 @@ DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
|
||||
DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
|
||||
(const struct spdk_nvmf_subsystem *subsystem), NULL);
|
||||
|
||||
DEFINE_STUB(spdk_bdev_zcopy_start, int,
|
||||
(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
||||
struct iovec *iov, int iovcnt,
|
||||
uint64_t offset_blocks, uint64_t num_blocks,
|
||||
bool populate,
|
||||
spdk_bdev_io_completion_cb cb, void *cb_arg),
|
||||
0);
|
||||
|
||||
DEFINE_STUB(spdk_bdev_zcopy_end, int,
|
||||
(struct spdk_bdev_io *bdev_io, bool commit,
|
||||
spdk_bdev_io_completion_cb cb, void *cb_arg),
|
||||
0);
|
||||
|
||||
struct spdk_nvmf_ns *
|
||||
spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
|
||||
{
|
||||
@ -514,6 +530,86 @@ test_nvmf_bdev_ctrlr_identify_ns(void)
|
||||
CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvmf_bdev_ctrlr_start_zcopy(void)
|
||||
{
|
||||
int rc;
|
||||
struct spdk_bdev bdev = {};
|
||||
struct spdk_bdev_desc *desc = NULL;
|
||||
struct spdk_io_channel ch = {};
|
||||
|
||||
struct spdk_nvmf_request write_req = {};
|
||||
union nvmf_c2h_msg write_rsp = {};
|
||||
|
||||
struct spdk_nvmf_qpair qpair = {};
|
||||
|
||||
struct spdk_nvme_cmd write_cmd = {};
|
||||
|
||||
struct spdk_nvmf_ctrlr ctrlr = {};
|
||||
struct spdk_nvmf_subsystem subsystem = {};
|
||||
struct spdk_nvmf_ns ns = {};
|
||||
struct spdk_nvmf_ns *subsys_ns[1] = {};
|
||||
|
||||
struct spdk_nvmf_poll_group group = {};
|
||||
struct spdk_nvmf_subsystem_poll_group sgroups = {};
|
||||
struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
|
||||
|
||||
bdev.blocklen = 512;
|
||||
bdev.blockcnt = 10;
|
||||
ns.bdev = &bdev;
|
||||
|
||||
subsystem.id = 0;
|
||||
subsystem.max_nsid = 1;
|
||||
subsys_ns[0] = &ns;
|
||||
subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
|
||||
|
||||
/* Enable controller */
|
||||
ctrlr.vcprop.cc.bits.en = 1;
|
||||
ctrlr.subsys = &subsystem;
|
||||
|
||||
group.num_sgroups = 1;
|
||||
sgroups.num_ns = 1;
|
||||
sgroups.ns_info = &ns_info;
|
||||
group.sgroups = &sgroups;
|
||||
|
||||
qpair.ctrlr = &ctrlr;
|
||||
qpair.group = &group;
|
||||
|
||||
write_req.qpair = &qpair;
|
||||
write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
|
||||
write_req.rsp = &write_rsp;
|
||||
|
||||
write_cmd.nsid = 1;
|
||||
write_cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
|
||||
/* 1. SUCCESS */
|
||||
write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
|
||||
write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
|
||||
write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
|
||||
|
||||
rc = nvmf_bdev_ctrlr_start_zcopy(&bdev, desc, &ch, &write_req);
|
||||
|
||||
CU_ASSERT(rc == 0);
|
||||
|
||||
/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
|
||||
write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
|
||||
write_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
|
||||
write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
|
||||
|
||||
rc = nvmf_bdev_ctrlr_start_zcopy(&bdev, desc, &ch, &write_req);
|
||||
|
||||
CU_ASSERT(rc < 0);
|
||||
|
||||
/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
|
||||
write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
|
||||
write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
|
||||
write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
|
||||
|
||||
rc = nvmf_bdev_ctrlr_start_zcopy(&bdev, desc, &ch, &write_req);
|
||||
|
||||
CU_ASSERT(rc < 0);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite = NULL;
|
||||
@ -528,8 +624,8 @@ int main(int argc, char **argv)
|
||||
CU_ADD_TEST(suite, test_lba_in_range);
|
||||
CU_ADD_TEST(suite, test_get_dif_ctx);
|
||||
CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
|
||||
|
||||
CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
|
||||
CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_start_zcopy);
|
||||
|
||||
CU_basic_set_mode(CU_BRM_VERBOSE);
|
||||
CU_basic_run_tests();
|
||||
|
@ -89,6 +89,9 @@ DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
|
||||
(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
struct spdk_bdev_module *module), 0);
|
||||
|
||||
DEFINE_STUB(spdk_bdev_io_type_supported, bool,
|
||||
(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
|
||||
|
||||
DEFINE_STUB_V(nvmf_ctrlr_reservation_notice_log,
|
||||
(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_ns *ns,
|
||||
enum spdk_nvme_reservation_notification_log_page_type type));
|
||||
|
@ -59,6 +59,10 @@ DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
|
||||
DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
|
||||
(const struct spdk_bdev *bdev), false);
|
||||
|
||||
DEFINE_STUB(spdk_bdev_io_type_supported, bool,
|
||||
(struct spdk_bdev *bdev,
|
||||
enum spdk_bdev_io_type io_type), false);
|
||||
|
||||
DEFINE_STUB(spdk_nvmf_transport_stop_listen,
|
||||
int,
|
||||
(struct spdk_nvmf_transport *transport,
|
||||
|
@ -186,6 +186,22 @@ DEFINE_STUB(nvmf_transport_req_complete,
|
||||
(struct spdk_nvmf_request *req),
|
||||
0);
|
||||
|
||||
DEFINE_STUB(nvmf_bdev_zcopy_enabled,
|
||||
bool,
|
||||
(struct spdk_bdev *bdev),
|
||||
false);
|
||||
|
||||
DEFINE_STUB(nvmf_bdev_ctrlr_start_zcopy,
|
||||
int,
|
||||
(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
||||
struct spdk_nvmf_request *req),
|
||||
0);
|
||||
|
||||
DEFINE_STUB(nvmf_bdev_ctrlr_end_zcopy,
|
||||
int,
|
||||
(struct spdk_nvmf_request *req),
|
||||
0);
|
||||
|
||||
DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
|
||||
(struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
|
||||
struct spdk_nvmf_transport *transport));
|
||||
|
Loading…
Reference in New Issue
Block a user