bdev: Not assert but pass completion status to spdk_bdev_io_get_buf_cb
When the specified buffer size to spdk_bdev_io_get_buf() is greater than the permitted maximum, spdk_bdev_io_get_buf() asserts simply and doesn't call the specified callback function. SPDK SCSI library doesn't allocate read buffer and specifies expected read buffer size, and expects that it is allocated by spdk_bdev_io_get_buf(). Bdev perf tool also doesn't allocate read buffer and specifies expected read buffer size, and expects that it is allocated by spdk_bdev_io_get_buf(). When we support DIF insert and strip in iSCSI target, the read buffer size iSCSI initiator requests and the read buffer size iSCSI target requests will become different. Even after that, iSCSI initiator and iSCSI target will negotiate correctly not to cause buffer overflow in spdk_bdev_io_get_buf(), but if iSCSI initiator ignores the result of negotiation, iSCSI initiator can request read buffer size larger than the permitted maximum, and can cause failure in iSCSI target. This is very flagile and should be avoided. This patch do the following - Add the completion status of spdk_bdev_io_get_buf() to spdk_bdev_io_get_buf_cb(), - spdk_bdev_io_get_buf() calls spdk_bdev_io_get_buf_cb() by setting success to false, and return. - spdk_bdev_io_get_buf_cb() in each bdev module calls assert if success is false. Subsequent patches will process the case that success is false in spdk_bdev_io_get_buf_cb(). Change-Id: I76429a86e18a69aa085a353ac94743296d270b82 Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-on: https://review.gerrithub.io/c/446045 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Ziye Yang <ziye.yang@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
This commit is contained in:
parent
518c8add8a
commit
4b92ffb3f1
@ -396,7 +396,17 @@ struct spdk_bdev {
|
||||
} internal;
|
||||
};
|
||||
|
||||
typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
|
||||
/**
|
||||
* Callback when buffer is allocated for the bdev I/O.
|
||||
*
|
||||
* \param ch The I/O channel the bdev I/O was handled on.
|
||||
* \param bdev_io The bdev I/O
|
||||
* \param success True if buffer is allocated successfully or the bdev I/O has an SGL
|
||||
* assigned already, or false if it failed. The possible reason of failure is the size
|
||||
* of the buffer to allocate is greater than the permitted maximum.
|
||||
*/
|
||||
typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success);
|
||||
|
||||
#define BDEV_IO_NUM_CHILD_IOV 32
|
||||
|
||||
|
@ -394,8 +394,12 @@ bdev_aio_reset(struct file_disk *fdisk, struct bdev_aio_task *aio_task)
|
||||
bdev_aio_reset_retry_timer(fdisk);
|
||||
}
|
||||
|
||||
static void bdev_aio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
static void
|
||||
bdev_aio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
switch (bdev_io->type) {
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
bdev_aio_readv((struct file_disk *)bdev_io->bdev->ctxt,
|
||||
|
@ -549,7 +549,7 @@ spdk_bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
|
||||
|
||||
STAILQ_REMOVE_HEAD(stailq, internal.buf_link);
|
||||
tmp->internal.buf = buf;
|
||||
tmp->internal.get_buf_cb(tmp->internal.ch->channel, tmp);
|
||||
tmp->internal.get_buf_cb(tmp->internal.ch->channel, tmp, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -591,11 +591,17 @@ spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, u
|
||||
if (buf_allocated &&
|
||||
_are_iovs_aligned(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, alignment)) {
|
||||
/* Buffer already present and aligned */
|
||||
cb(bdev_io->internal.ch->channel, bdev_io);
|
||||
cb(bdev_io->internal.ch->channel, bdev_io, true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (len + alignment > SPDK_BDEV_LARGE_BUF_MAX_SIZE + SPDK_BDEV_POOL_ALIGNMENT) {
|
||||
SPDK_ERRLOG("Length + alignment %" PRIu64 " is larger than allowed\n",
|
||||
len + alignment);
|
||||
cb(bdev_io->internal.ch->channel, bdev_io, false);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(len + alignment <= SPDK_BDEV_LARGE_BUF_MAX_SIZE + SPDK_BDEV_POOL_ALIGNMENT);
|
||||
mgmt_ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
|
||||
|
||||
bdev_io->internal.buf_len = len;
|
||||
@ -622,7 +628,7 @@ spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, u
|
||||
spdk_bdev_io_set_buf(bdev_io, aligned_buf, len);
|
||||
}
|
||||
bdev_io->internal.buf = buf;
|
||||
bdev_io->internal.get_buf_cb(bdev_io->internal.ch->channel, bdev_io);
|
||||
bdev_io->internal.get_buf_cb(bdev_io->internal.ch->channel, bdev_io, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1647,6 +1653,15 @@ _spdk_bdev_io_split(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
_spdk_bdev_io_split_with_payload(bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bdev_io_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
_spdk_bdev_io_split(ch, bdev_io);
|
||||
}
|
||||
|
||||
/* Explicitly mark this inline, since it's used as a function pointer and otherwise won't
|
||||
* be inlined, at least on some compilers.
|
||||
*/
|
||||
@ -1699,7 +1714,7 @@ spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io)
|
||||
|
||||
if (bdev->split_on_optimal_io_boundary && _spdk_bdev_io_should_split(bdev_io)) {
|
||||
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
|
||||
spdk_bdev_io_get_buf(bdev_io, _spdk_bdev_io_split,
|
||||
spdk_bdev_io_get_buf(bdev_io, _spdk_bdev_io_split_get_buf_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
} else {
|
||||
_spdk_bdev_io_split(NULL, bdev_io);
|
||||
|
@ -905,13 +905,16 @@ _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
|
||||
* beneath us before we're done with it.
|
||||
*/
|
||||
static void
|
||||
crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
|
||||
crypto_bdev);
|
||||
struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch);
|
||||
int rc;
|
||||
|
||||
assert(success == true);
|
||||
|
||||
rc = spdk_bdev_readv_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, bdev_io->u.bdev.iovs,
|
||||
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
|
||||
bdev_io->u.bdev.num_blocks, _complete_internal_read,
|
||||
|
@ -312,8 +312,11 @@ bdev_ftl_writev(struct ftl_bdev *ftl_bdev, struct spdk_io_channel *ch,
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_ftl_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
bdev_ftl_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
int rc = bdev_ftl_readv((struct ftl_bdev *)bdev_io->bdev->ctxt,
|
||||
ch, (struct ftl_bdev_io *)bdev_io->driver_ctx);
|
||||
|
||||
|
@ -396,8 +396,12 @@ bdev_iscsi_no_master_ch_poll(void *arg)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bdev_iscsi_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
static void
|
||||
bdev_iscsi_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
bdev_iscsi_readv((struct bdev_iscsi_lun *)bdev_io->bdev->ctxt,
|
||||
(struct bdev_iscsi_io *)bdev_io->driver_ctx,
|
||||
bdev_io->u.bdev.iovs,
|
||||
|
@ -840,6 +840,14 @@ lvol_reset(struct spdk_bdev_io *bdev_io)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
lvol_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
lvol_read(ch, bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
vbdev_lvol_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
@ -849,7 +857,7 @@ vbdev_lvol_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_
|
||||
|
||||
switch (bdev_io->type) {
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
spdk_bdev_io_get_buf(bdev_io, lvol_read,
|
||||
spdk_bdev_io_get_buf(bdev_io, lvol_get_buf_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_WRITE:
|
||||
|
@ -374,10 +374,13 @@ bdev_nvme_unmap(struct nvme_bdev *nbdev, struct spdk_io_channel *ch,
|
||||
uint64_t num_blocks);
|
||||
|
||||
static void
|
||||
bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
int ret;
|
||||
|
||||
assert(success == true);
|
||||
|
||||
ret = bdev_nvme_readv((struct nvme_bdev *)bdev_io->bdev->ctxt,
|
||||
ch,
|
||||
(struct nvme_bdev_io *)bdev_io->driver_ctx,
|
||||
|
@ -341,6 +341,15 @@ fail:
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
vbdev_ocf_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
io_handle(ch, bdev_io);
|
||||
}
|
||||
|
||||
/* Called from bdev layer when an io to Cache vbdev is submitted */
|
||||
static void
|
||||
vbdev_ocf_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
@ -349,7 +358,7 @@ vbdev_ocf_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
/* User does not have to allocate io vectors for the request,
|
||||
* so in case they are not allocated, we allocate them here */
|
||||
spdk_bdev_io_get_buf(bdev_io, io_handle,
|
||||
spdk_bdev_io_get_buf(bdev_io, vbdev_ocf_get_buf_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_WRITE:
|
||||
|
@ -211,12 +211,14 @@ vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io)
|
||||
* if this example were used as a template for something more complex.
|
||||
*/
|
||||
static void
|
||||
pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
|
||||
{
|
||||
struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru,
|
||||
pt_bdev);
|
||||
struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
|
||||
|
||||
assert(success == true);
|
||||
|
||||
spdk_bdev_readv_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
|
||||
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
|
||||
bdev_io->u.bdev.num_blocks, _pt_complete_io,
|
||||
|
@ -184,8 +184,11 @@ bdev_pmem_write_zeros(struct spdk_bdev_io *bdev_io, struct pmem_disk *pdisk,
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_pmem_io_get_buf_cb(struct spdk_io_channel *channel, struct spdk_bdev_io *bdev_io)
|
||||
bdev_pmem_io_get_buf_cb(struct spdk_io_channel *channel, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
bdev_pmem_submit_io(bdev_io,
|
||||
bdev_io->bdev->ctxt,
|
||||
channel,
|
||||
|
@ -564,6 +564,25 @@ _raid_bdev_submit_reset_request(struct spdk_io_channel *ch, struct spdk_bdev_io
|
||||
_raid_bdev_submit_reset_request_next(bdev_io);
|
||||
}
|
||||
|
||||
/*
|
||||
* brief:
|
||||
* Callback function to spdk_bdev_io_get_buf.
|
||||
* params:
|
||||
* ch - pointer to raid bdev io channel
|
||||
* bdev_io - pointer to parent bdev_io on raid bdev device
|
||||
* success - True if buffer is allocated or false otherwise.
|
||||
* returns:
|
||||
* none
|
||||
*/
|
||||
static void
|
||||
raid_bdev_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
raid_bdev_start_rw_request(ch, bdev_io);
|
||||
}
|
||||
|
||||
/*
|
||||
* brief:
|
||||
* raid_bdev_submit_request function is the submit_request function pointer of
|
||||
@ -581,7 +600,7 @@ raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
switch (bdev_io->type) {
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
|
||||
spdk_bdev_io_get_buf(bdev_io, raid_bdev_start_rw_request,
|
||||
spdk_bdev_io_get_buf(bdev_io, raid_bdev_get_buf_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
} else {
|
||||
/* Just call it directly if iov_base is already populated. */
|
||||
|
@ -373,10 +373,14 @@ bdev_rbd_destruct(void *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bdev_rbd_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
static void
|
||||
bdev_rbd_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
int ret;
|
||||
|
||||
assert(success == true);
|
||||
|
||||
ret = bdev_rbd_rw(bdev_io->bdev->ctxt,
|
||||
ch,
|
||||
bdev_io,
|
||||
|
@ -164,7 +164,6 @@ bdev_virtio_command(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
struct virtio_blk_outhdr *req = &io_ctx->req;
|
||||
struct virtio_blk_discard_write_zeroes *desc = &io_ctx->unmap;
|
||||
|
||||
|
||||
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
|
||||
req->type = VIRTIO_BLK_T_IN;
|
||||
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
|
||||
@ -184,6 +183,15 @@ bdev_virtio_command(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
bdev_virtio_blk_send_io(ch, bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
bdev_virtio_command(ch, bdev_io);
|
||||
}
|
||||
|
||||
static int
|
||||
_bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
@ -191,7 +199,7 @@ _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bde
|
||||
|
||||
switch (bdev_io->type) {
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
spdk_bdev_io_get_buf(bdev_io, bdev_virtio_command,
|
||||
spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
return 0;
|
||||
case SPDK_BDEV_IO_TYPE_WRITE:
|
||||
|
@ -578,7 +578,7 @@ bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
|
||||
{
|
||||
struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
|
||||
struct virtio_scsi_cmd_req *req = &io_ctx->req;
|
||||
@ -587,6 +587,8 @@ bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
uint64_t offset_blocks, num_blocks;
|
||||
uint16_t cmd_len;
|
||||
|
||||
assert(success == true);
|
||||
|
||||
buf = bdev_io->u.bdev.iovs[0].iov_base;
|
||||
|
||||
offset_blocks = bdev_io->u.bdev.offset_blocks;
|
||||
@ -622,13 +624,22 @@ bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
bdev_virtio_send_io(ch, bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
assert(success == true);
|
||||
|
||||
bdev_virtio_rw(ch, bdev_io);
|
||||
}
|
||||
|
||||
static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
|
||||
|
||||
switch (bdev_io->type) {
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
spdk_bdev_io_get_buf(bdev_io, bdev_virtio_rw,
|
||||
spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
return 0;
|
||||
case SPDK_BDEV_IO_TYPE_WRITE:
|
||||
|
@ -179,10 +179,19 @@ stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
|
||||
free(expected_io);
|
||||
}
|
||||
|
||||
static void
|
||||
stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch,
|
||||
struct spdk_bdev_io *bdev_io, bool success)
|
||||
{
|
||||
CU_ASSERT(success == true);
|
||||
|
||||
stub_submit_request(_ch, bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
spdk_bdev_io_get_buf(bdev_io, stub_submit_request,
|
||||
spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
|
||||
void
|
||||
spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
|
||||
{
|
||||
cb(g_io_ch, g_bdev_io);
|
||||
cb(g_io_ch, g_bdev_io, true);
|
||||
}
|
||||
|
||||
/* Mock these functions to call the callback and then return the value we require */
|
||||
|
@ -151,7 +151,7 @@ pmemblk_open(const char *path, size_t bsize)
|
||||
void
|
||||
spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
|
||||
{
|
||||
cb(NULL, bdev_io);
|
||||
cb(NULL, bdev_io, true);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -479,7 +479,7 @@ struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol)
|
||||
void
|
||||
spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
|
||||
{
|
||||
CU_ASSERT(cb == lvol_read);
|
||||
CU_ASSERT(cb == lvol_get_buf_cb);
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
x
Reference in New Issue
Block a user