nvmf: add Write Zeroes support

Change-Id: I743f5e4d1c24ad5ef9f1fef4c2678e347b179a9f
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/377260
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Daniel Verkamp 2017-09-05 17:26:14 -07:00 committed by Jim Harris
parent 5eb129647d
commit 0caab4e1da
6 changed files with 71 additions and 8 deletions

View File

@ -55,6 +55,8 @@ AllowAnyHost is disabled by default.
NVMe-oF namespaces may now be assigned arbitrary namespace IDs, and the number NVMe-oF namespaces may now be assigned arbitrary namespace IDs, and the number
of namespaces per subsystem is no longer limited. of namespaces per subsystem is no longer limited.
The NVMe-oF target now supports the Write Zeroes command.
### Environment Abstraction Layer ### Environment Abstraction Layer
A new default value, SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, was added to provide A new default value, SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, was added to provide

View File

@ -852,6 +852,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
cdata->nvmf_specific.ioccsz += tgt->opts.in_capsule_data_size / 16; cdata->nvmf_specific.ioccsz += tgt->opts.in_capsule_data_size / 16;
cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr); cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr);
cdata->oncs.write_zeroes = spdk_nvmf_ctrlr_write_zeroes_supported(ctrlr);
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "ext ctrlr data: ioccsz 0x%x\n", SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "ext ctrlr data: ioccsz 0x%x\n",
cdata->nvmf_specific.ioccsz); cdata->nvmf_specific.ioccsz);

View File

@ -48,10 +48,10 @@
#include "spdk_internal/log.h" #include "spdk_internal/log.h"
bool static bool
spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr) spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
enum spdk_bdev_io_type io_type)
{ {
struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
struct spdk_nvmf_ns *ns; struct spdk_nvmf_ns *ns;
for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
@ -60,20 +60,32 @@ spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
continue; continue;
} }
if (!spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_UNMAP)) { if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, SPDK_DEBUGLOG(SPDK_TRACE_NVMF,
"Subsystem %s namespace %u (%s) does not support unmap - not enabling DSM\n", "Subsystem %s namespace %u (%s) does not support io_type %d\n",
spdk_nvmf_subsystem_get_nqn(subsystem), spdk_nvmf_subsystem_get_nqn(subsystem),
ns->id, spdk_bdev_get_name(ns->bdev)); ns->id, spdk_bdev_get_name(ns->bdev), (int)io_type);
return false; return false;
} }
} }
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "All devices in Subsystem %s support unmap - enabling DSM\n", SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "All devices in Subsystem %s support io_type %d\n",
spdk_nvmf_subsystem_get_nqn(subsystem)); spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
return true; return true;
} }
bool
spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
{
return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
}
bool
spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
{
return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
}
static void static void
nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success, nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
void *cb_arg) void *cb_arg)
@ -209,6 +221,36 @@ nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
} }
static int
nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
{
uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint64_t start_lba;
uint64_t num_blocks;
nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
SPDK_ERRLOG("end of media\n");
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0);
if (spdk_unlikely(spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
nvmf_bdev_ctrlr_complete_cmd, req))) {
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}
static int static int
nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req) struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
@ -361,6 +403,8 @@ spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req); return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_WRITE: case SPDK_NVME_OPC_WRITE:
return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req); return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_WRITE_ZEROES:
return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_FLUSH: case SPDK_NVME_OPC_FLUSH:
return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_DATASET_MANAGEMENT: case SPDK_NVME_OPC_DATASET_MANAGEMENT:

View File

@ -234,6 +234,7 @@ void spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
int spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req); int spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req); int spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
bool spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr); bool spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
bool spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
int spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_bdev *bdev, struct spdk_nvme_ns_data *nsdata); int spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_bdev *bdev, struct spdk_nvme_ns_data *nsdata);

View File

@ -128,6 +128,13 @@ spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
return false; return false;
} }
bool
spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
{
abort();
return false;
}
int int
spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_bdev *bdev, struct spdk_nvme_ns_data *nsdata) spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_bdev *bdev, struct spdk_nvme_ns_data *nsdata)
{ {

View File

@ -129,6 +129,14 @@ spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, v
return 0; return 0;
} }
int
spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg)
{
return 0;
}
int int
spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_io_channel *ch,