nvmf: return async/complete status in bdev zcopy operations

Additionally, the NVMe completion status is now updated and the IOs are
queued if the bdev layer doesn't have enough IO descriptors.  It makes
the zcopy operations behave similarly to the other IO operations.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I455ae781e32aa6e60d144d2c91f109bd8be46664
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10787
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Konrad Sztyber 2021-12-06 14:22:16 +01:00 committed by Jim Harris
parent 0e09df57dd
commit 686b9984b3
3 changed files with 46 additions and 12 deletions

View File

@ -838,28 +838,45 @@ nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
struct spdk_io_channel *ch,
struct spdk_nvmf_request *req)
{
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
uint32_t block_size = spdk_bdev_get_block_size(bdev);
uint64_t start_lba;
uint64_t num_blocks;
int rc;
nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks);
if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
SPDK_ERRLOG("end of media\n");
return -ENXIO;
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (spdk_unlikely(num_blocks * block_size > req->length)) {
SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
num_blocks, block_size, req->length);
return -ENXIO;
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false;
return spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req);
rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req);
if (spdk_unlikely(rc != 0)) {
if (rc == -ENOMEM) {
nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}
static void
@ -887,5 +904,12 @@ nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success,
int
nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
{
return spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req);
int rc __attribute__((unused));
rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req);
/* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */
assert(rc == 0);
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}

View File

@ -483,8 +483,10 @@ nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
* \param ch The \ref spdk_io_channel
* \param req The \ref spdk_nvmf_request passed to the bdev for processing
*
* \return 0 upon success
* \return <0 if the zcopy operation could not be started
* \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
* SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
* completed asynchronously. Asynchronous completions are notified through
* spdk_nvmf_request_complete().
*/
int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
struct spdk_bdev_desc *desc,
@ -497,8 +499,10 @@ int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
* \param req The NVMe-oF request
* \param commit Flag indicating whether the buffers should be committed
*
* \return 0 upon success
* \return <0 on error
* \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
* SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
* completed asynchronously. Asynchronous completions are notified through
* spdk_nvmf_request_complete().
*/
int nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit);

View File

@ -606,7 +606,9 @@ test_nvmf_bdev_ctrlr_zcopy_start(void)
rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
CU_ASSERT(rc == 0);
CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
@ -615,7 +617,9 @@ test_nvmf_bdev_ctrlr_zcopy_start(void)
rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
CU_ASSERT(rc < 0);
CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
@ -624,7 +628,9 @@ test_nvmf_bdev_ctrlr_zcopy_start(void)
rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
CU_ASSERT(rc < 0);
CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
}
static void