nvmf: fix segfault in case of multi-range unmap
In case of a DSM Deallocate (unmap) with multiple ranges, individual bdev IOs are submitted for each range. If the bdev IO cannot be allocated, the request is queued on io_wait_queue; however previously submitted ranges may complete before memory is available for the next range. In such a case, the completion callback will free unmap_ctx, while the request is still queued for memory - causing a segfault when the request is dequeued. To fix, introduce a new field tracking the unmap ranges, and make sure the count is nonzero when the request is queued for memory. Signed-off-by: Yair Elharrar <yair@excelero.com> Change-Id: Ifcac018f14af5ca408c7793ca9543c1e2d63b777 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447542 Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
e051a5366e
commit
2b0ae30bf1
@ -366,6 +366,7 @@ struct nvmf_bdev_ctrlr_unmap {
|
||||
struct spdk_bdev_desc *desc;
|
||||
struct spdk_bdev *bdev;
|
||||
struct spdk_io_channel *ch;
|
||||
uint32_t range_index;
|
||||
};
|
||||
|
||||
static void
|
||||
@ -439,13 +440,16 @@ nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
unmap_ctx->req = req;
|
||||
unmap_ctx->desc = desc;
|
||||
unmap_ctx->ch = ch;
|
||||
unmap_ctx->bdev = bdev;
|
||||
|
||||
response->status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
response->status.sc = SPDK_NVME_SC_SUCCESS;
|
||||
} else {
|
||||
unmap_ctx->count--; /* dequeued */
|
||||
}
|
||||
|
||||
response->status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
response->status.sc = SPDK_NVME_SC_SUCCESS;
|
||||
|
||||
dsm_range = (struct spdk_nvme_dsm_range *)req->data;
|
||||
for (i = unmap_ctx->count; i < nr; i++) {
|
||||
for (i = unmap_ctx->range_index; i < nr; i++) {
|
||||
lba = dsm_range[i].starting_lba;
|
||||
lba_count = dsm_range[i].length;
|
||||
|
||||
@ -457,7 +461,7 @@ nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
if (rc == -ENOMEM) {
|
||||
nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
|
||||
/* Unmap was not yet submitted to bdev */
|
||||
unmap_ctx->count--;
|
||||
/* unmap_ctx->count will be decremented when the request is dequeued */
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
|
||||
}
|
||||
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
||||
@ -466,6 +470,7 @@ nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
||||
* unmaps already sent to complete */
|
||||
break;
|
||||
}
|
||||
unmap_ctx->range_index++;
|
||||
}
|
||||
|
||||
if (unmap_ctx->count == 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user