2016-07-20 18:16:23 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008-2012 Daisuke Aoyama <aoyama@peach.ne.jp>.
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/bdev.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-03-27 19:59:40 +00:00
|
|
|
#include "spdk/env.h"
|
2017-10-25 13:58:02 +00:00
|
|
|
#include "spdk/event.h"
|
2017-04-04 21:01:54 +00:00
|
|
|
#include "spdk/io_channel.h"
|
2017-05-09 21:32:49 +00:00
|
|
|
#include "spdk/likely.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
#include "spdk/queue.h"
|
2017-01-18 21:43:15 +00:00
|
|
|
#include "spdk/nvme_spec.h"
|
2017-01-18 22:15:35 +00:00
|
|
|
#include "spdk/scsi_spec.h"
|
2017-08-29 05:24:52 +00:00
|
|
|
#include "spdk/util.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-01-04 22:30:04 +00:00
|
|
|
#include "spdk_internal/bdev.h"
|
2016-11-07 22:10:28 +00:00
|
|
|
#include "spdk_internal/log.h"
|
2017-05-30 08:45:46 +00:00
|
|
|
#include "spdk/string.h"
|
|
|
|
|
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
#include "ittnotify.h"
|
2017-06-15 16:59:02 +00:00
|
|
|
#include "ittnotify_types.h"
|
|
|
|
int __itt_init_ittlib(const char *, __itt_group_id);
|
2017-05-30 08:45:46 +00:00
|
|
|
#endif
|
2016-11-07 22:10:28 +00:00
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
#define SPDK_BDEV_IO_POOL_SIZE (64 * 1024)
|
2017-05-05 20:15:51 +00:00
|
|
|
#define BUF_SMALL_POOL_SIZE 8192
|
|
|
|
#define BUF_LARGE_POOL_SIZE 1024
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
#define NOMEM_THRESHOLD_COUNT 8
|
2017-07-28 22:34:24 +00:00
|
|
|
#define ZERO_BUFFER_SIZE 0x100000
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-09-12 16:37:52 +00:00
|
|
|
typedef TAILQ_HEAD(, spdk_bdev_io) bdev_io_tailq_t;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
struct spdk_bdev_mgr {
|
|
|
|
struct spdk_mempool *bdev_io_pool;
|
|
|
|
|
|
|
|
struct spdk_mempool *buf_small_pool;
|
|
|
|
struct spdk_mempool *buf_large_pool;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
void *zero_buffer;
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
TAILQ_HEAD(, spdk_bdev_module_if) bdev_modules;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
TAILQ_HEAD(, spdk_bdev) bdevs;
|
2017-05-30 08:45:46 +00:00
|
|
|
|
2017-07-10 23:36:35 +00:00
|
|
|
bool init_complete;
|
|
|
|
bool module_init_complete;
|
|
|
|
|
2017-05-30 08:45:46 +00:00
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
__itt_domain *domain;
|
|
|
|
#endif
|
2017-05-09 21:09:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct spdk_bdev_mgr g_bdev_mgr = {
|
|
|
|
.bdev_modules = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdev_modules),
|
|
|
|
.bdevs = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdevs),
|
2017-07-10 23:36:35 +00:00
|
|
|
.init_complete = false,
|
|
|
|
.module_init_complete = false,
|
2017-05-09 21:09:28 +00:00
|
|
|
};
|
2016-08-01 21:31:02 +00:00
|
|
|
|
2017-10-25 13:58:02 +00:00
|
|
|
static spdk_bdev_init_cb g_init_cb_fn = NULL;
|
|
|
|
static void *g_init_cb_arg = NULL;
|
|
|
|
|
|
|
|
static spdk_bdev_fini_cb g_fini_cb_fn = NULL;
|
|
|
|
static void *g_fini_cb_arg = NULL;
|
2017-11-21 17:45:27 +00:00
|
|
|
static struct spdk_thread *g_fini_thread = NULL;
|
2017-05-26 04:58:04 +00:00
|
|
|
|
2017-06-15 19:01:53 +00:00
|
|
|
|
2017-05-09 22:07:56 +00:00
|
|
|
struct spdk_bdev_mgmt_channel {
|
2017-09-12 16:37:52 +00:00
|
|
|
bdev_io_tailq_t need_buf_small;
|
|
|
|
bdev_io_tailq_t need_buf_large;
|
2017-05-09 22:07:56 +00:00
|
|
|
};
|
|
|
|
|
2017-06-29 18:23:50 +00:00
|
|
|
struct spdk_bdev_desc {
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
spdk_bdev_remove_cb_t remove_cb;
|
|
|
|
void *remove_ctx;
|
|
|
|
bool write;
|
|
|
|
TAILQ_ENTRY(spdk_bdev_desc) link;
|
|
|
|
};
|
|
|
|
|
2017-09-08 18:44:50 +00:00
|
|
|
#define BDEV_CH_RESET_IN_PROGRESS (1 << 0)
|
|
|
|
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel {
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
|
|
|
/* The channel for the underlying device */
|
|
|
|
struct spdk_io_channel *channel;
|
2017-05-09 22:07:56 +00:00
|
|
|
|
|
|
|
/* Channel for the bdev manager */
|
|
|
|
struct spdk_io_channel *mgmt_channel;
|
2017-04-06 21:40:29 +00:00
|
|
|
|
|
|
|
struct spdk_bdev_io_stat stat;
|
2017-05-30 08:45:46 +00:00
|
|
|
|
2017-05-22 17:49:12 +00:00
|
|
|
/*
|
|
|
|
* Count of I/O submitted to bdev module and waiting for completion.
|
|
|
|
* Incremented before submit_request() is called on an spdk_bdev_io.
|
|
|
|
*/
|
|
|
|
uint64_t io_outstanding;
|
|
|
|
|
2017-09-12 16:37:52 +00:00
|
|
|
bdev_io_tailq_t queued_resets;
|
2017-09-08 19:34:49 +00:00
|
|
|
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
/*
|
|
|
|
* Queue of IO awaiting retry because of a previous NOMEM status returned
|
|
|
|
* on this channel.
|
|
|
|
*/
|
|
|
|
bdev_io_tailq_t nomem_io;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Threshold which io_outstanding must drop to before retrying nomem_io.
|
|
|
|
*/
|
|
|
|
uint64_t nomem_threshold;
|
|
|
|
|
2017-09-08 18:44:50 +00:00
|
|
|
uint32_t flags;
|
|
|
|
|
2017-05-30 08:45:46 +00:00
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
uint64_t start_tsc;
|
|
|
|
uint64_t interval_tsc;
|
|
|
|
__itt_string_handle *handle;
|
|
|
|
#endif
|
|
|
|
|
2017-04-04 21:01:54 +00:00
|
|
|
};
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
static void spdk_bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
|
|
|
|
|
2017-05-09 21:01:12 +00:00
|
|
|
struct spdk_bdev *
|
|
|
|
spdk_bdev_first(void)
|
2016-08-01 21:31:02 +00:00
|
|
|
{
|
2016-08-02 17:07:34 +00:00
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
bdev = TAILQ_FIRST(&g_bdev_mgr.bdevs);
|
2016-08-02 17:07:34 +00:00
|
|
|
if (bdev) {
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Starting bdev iteration at %s\n", bdev->name);
|
2016-08-02 17:07:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return bdev;
|
2016-08-01 21:31:02 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 21:01:12 +00:00
|
|
|
struct spdk_bdev *
|
|
|
|
spdk_bdev_next(struct spdk_bdev *prev)
|
2016-08-01 21:31:02 +00:00
|
|
|
{
|
2016-08-02 17:07:34 +00:00
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
|
|
|
bdev = TAILQ_NEXT(prev, link);
|
|
|
|
if (bdev) {
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Continuing bdev iteration at %s\n", bdev->name);
|
2016-08-02 17:07:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return bdev;
|
2016-08-01 21:31:02 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 20:16:26 +00:00
|
|
|
static struct spdk_bdev *
|
|
|
|
_bdev_next_leaf(struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
while (bdev != NULL) {
|
|
|
|
if (TAILQ_EMPTY(&bdev->vbdevs)) {
|
|
|
|
return bdev;
|
|
|
|
} else {
|
|
|
|
bdev = TAILQ_NEXT(bdev, link);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return bdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_bdev *
|
|
|
|
spdk_bdev_first_leaf(void)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
|
|
|
bdev = _bdev_next_leaf(TAILQ_FIRST(&g_bdev_mgr.bdevs));
|
|
|
|
|
|
|
|
if (bdev) {
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Starting bdev iteration at %s\n", bdev->name);
|
2017-06-29 20:16:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return bdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_bdev *
|
|
|
|
spdk_bdev_next_leaf(struct spdk_bdev *prev)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
|
|
|
bdev = _bdev_next_leaf(TAILQ_NEXT(prev, link));
|
|
|
|
|
|
|
|
if (bdev) {
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Continuing bdev iteration at %s\n", bdev->name);
|
2017-06-29 20:16:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return bdev;
|
|
|
|
}
|
|
|
|
|
2017-05-09 21:01:12 +00:00
|
|
|
struct spdk_bdev *
|
|
|
|
spdk_bdev_get_by_name(const char *bdev_name)
|
2016-08-01 21:31:02 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev = spdk_bdev_first();
|
|
|
|
|
|
|
|
while (bdev != NULL) {
|
2017-06-02 17:25:43 +00:00
|
|
|
if (strcmp(bdev_name, bdev->name) == 0) {
|
2016-08-02 17:07:34 +00:00
|
|
|
return bdev;
|
2016-08-01 21:31:02 +00:00
|
|
|
}
|
|
|
|
bdev = spdk_bdev_next(bdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static void
|
2017-05-05 20:15:51 +00:00
|
|
|
spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-05-05 20:15:51 +00:00
|
|
|
assert(bdev_io->get_buf_cb != NULL);
|
2016-08-18 16:45:50 +00:00
|
|
|
assert(buf != NULL);
|
2017-09-20 13:10:17 +00:00
|
|
|
assert(bdev_io->u.bdev.iovs != NULL);
|
2016-10-04 14:39:27 +00:00
|
|
|
|
2017-05-09 20:32:20 +00:00
|
|
|
bdev_io->buf = buf;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iovs[0].iov_base = (void *)((unsigned long)((char *)buf + 512) & ~511UL);
|
2017-09-22 20:59:55 +00:00
|
|
|
bdev_io->u.bdev.iovs[0].iov_len = bdev_io->buf_len;
|
2017-05-05 20:15:51 +00:00
|
|
|
bdev_io->get_buf_cb(bdev_io->ch->channel, bdev_io);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-05-05 20:15:51 +00:00
|
|
|
spdk_bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-04-11 04:36:05 +00:00
|
|
|
struct spdk_mempool *pool;
|
2016-09-19 06:06:40 +00:00
|
|
|
struct spdk_bdev_io *tmp;
|
2016-07-20 18:16:23 +00:00
|
|
|
void *buf;
|
2017-09-12 16:37:52 +00:00
|
|
|
bdev_io_tailq_t *tailq;
|
2017-05-10 21:42:45 +00:00
|
|
|
struct spdk_bdev_mgmt_channel *ch;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-09-20 13:10:17 +00:00
|
|
|
assert(bdev_io->u.bdev.iovcnt == 1);
|
2016-10-04 14:39:27 +00:00
|
|
|
|
2017-05-09 20:32:20 +00:00
|
|
|
buf = bdev_io->buf;
|
2017-05-10 21:42:45 +00:00
|
|
|
ch = spdk_io_channel_get_ctx(bdev_io->ch->mgmt_channel);
|
|
|
|
|
2017-09-22 20:59:55 +00:00
|
|
|
if (bdev_io->buf_len <= SPDK_BDEV_SMALL_BUF_MAX_SIZE) {
|
2017-05-09 21:09:28 +00:00
|
|
|
pool = g_bdev_mgr.buf_small_pool;
|
2017-05-10 21:42:45 +00:00
|
|
|
tailq = &ch->need_buf_small;
|
2016-07-20 18:16:23 +00:00
|
|
|
} else {
|
2017-05-09 21:09:28 +00:00
|
|
|
pool = g_bdev_mgr.buf_large_pool;
|
2017-05-10 21:42:45 +00:00
|
|
|
tailq = &ch->need_buf_large;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (TAILQ_EMPTY(tailq)) {
|
2017-04-11 04:36:05 +00:00
|
|
|
spdk_mempool_put(pool, buf);
|
2016-07-20 18:16:23 +00:00
|
|
|
} else {
|
2016-09-19 06:06:40 +00:00
|
|
|
tmp = TAILQ_FIRST(tailq);
|
2017-05-05 20:15:51 +00:00
|
|
|
TAILQ_REMOVE(tailq, tmp, buf_link);
|
|
|
|
spdk_bdev_io_set_buf(tmp, buf);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 21:32:49 +00:00
|
|
|
void
|
2017-09-22 20:59:55 +00:00
|
|
|
spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
|
2017-05-09 21:32:49 +00:00
|
|
|
{
|
|
|
|
struct spdk_mempool *pool;
|
2017-09-12 16:37:52 +00:00
|
|
|
bdev_io_tailq_t *tailq;
|
2017-05-09 21:32:49 +00:00
|
|
|
void *buf = NULL;
|
2017-05-10 21:42:45 +00:00
|
|
|
struct spdk_bdev_mgmt_channel *ch;
|
2017-05-09 21:32:49 +00:00
|
|
|
|
|
|
|
assert(cb != NULL);
|
2017-09-20 13:10:17 +00:00
|
|
|
assert(bdev_io->u.bdev.iovs != NULL);
|
2017-05-09 21:32:49 +00:00
|
|
|
|
2017-09-20 13:10:17 +00:00
|
|
|
if (spdk_unlikely(bdev_io->u.bdev.iovs[0].iov_base != NULL)) {
|
2017-05-09 21:32:49 +00:00
|
|
|
/* Buffer already present */
|
|
|
|
cb(bdev_io->ch->channel, bdev_io);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-09-27 18:59:50 +00:00
|
|
|
assert(len <= SPDK_BDEV_LARGE_BUF_MAX_SIZE);
|
2017-05-10 21:42:45 +00:00
|
|
|
ch = spdk_io_channel_get_ctx(bdev_io->ch->mgmt_channel);
|
|
|
|
|
2017-09-22 20:59:55 +00:00
|
|
|
bdev_io->buf_len = len;
|
2017-05-09 21:32:49 +00:00
|
|
|
bdev_io->get_buf_cb = cb;
|
|
|
|
if (len <= SPDK_BDEV_SMALL_BUF_MAX_SIZE) {
|
|
|
|
pool = g_bdev_mgr.buf_small_pool;
|
2017-05-10 21:42:45 +00:00
|
|
|
tailq = &ch->need_buf_small;
|
2017-05-09 21:32:49 +00:00
|
|
|
} else {
|
|
|
|
pool = g_bdev_mgr.buf_large_pool;
|
2017-05-10 21:42:45 +00:00
|
|
|
tailq = &ch->need_buf_large;
|
2017-05-09 21:32:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
buf = spdk_mempool_get(pool);
|
|
|
|
|
|
|
|
if (!buf) {
|
|
|
|
TAILQ_INSERT_TAIL(tailq, bdev_io, buf_link);
|
|
|
|
} else {
|
|
|
|
spdk_bdev_io_set_buf(bdev_io, buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static int
|
|
|
|
spdk_bdev_module_get_max_ctx_size(void)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_module_if *bdev_module;
|
|
|
|
int max_bdev_module_size = 0;
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, tailq) {
|
2016-07-20 18:16:23 +00:00
|
|
|
if (bdev_module->get_ctx_size && bdev_module->get_ctx_size() > max_bdev_module_size) {
|
|
|
|
max_bdev_module_size = bdev_module->get_ctx_size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return max_bdev_module_size;
|
|
|
|
}
|
|
|
|
|
2017-06-06 20:26:04 +00:00
|
|
|
void
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_config_text(FILE *fp)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_module_if *bdev_module;
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, tailq) {
|
2016-07-20 18:16:23 +00:00
|
|
|
if (bdev_module->config_text) {
|
|
|
|
bdev_module->config_text(fp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 22:07:56 +00:00
|
|
|
static int
|
|
|
|
spdk_bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
|
|
|
|
{
|
2017-05-10 21:42:45 +00:00
|
|
|
struct spdk_bdev_mgmt_channel *ch = ctx_buf;
|
|
|
|
|
|
|
|
TAILQ_INIT(&ch->need_buf_small);
|
|
|
|
TAILQ_INIT(&ch->need_buf_large);
|
|
|
|
|
2017-05-09 22:07:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
|
|
|
|
{
|
2017-05-10 21:42:45 +00:00
|
|
|
struct spdk_bdev_mgmt_channel *ch = ctx_buf;
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&ch->need_buf_small) || !TAILQ_EMPTY(&ch->need_buf_large)) {
|
|
|
|
SPDK_ERRLOG("Pending I/O list wasn't empty on channel destruction\n");
|
|
|
|
}
|
2017-05-09 22:07:56 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 23:37:15 +00:00
|
|
|
static void
|
|
|
|
spdk_bdev_init_complete(int rc)
|
|
|
|
{
|
2017-10-25 13:58:02 +00:00
|
|
|
spdk_bdev_init_cb cb_fn = g_init_cb_fn;
|
|
|
|
void *cb_arg = g_init_cb_arg;
|
2017-06-14 23:37:15 +00:00
|
|
|
|
2017-07-10 23:36:35 +00:00
|
|
|
g_bdev_mgr.init_complete = true;
|
2017-10-25 13:58:02 +00:00
|
|
|
g_init_cb_fn = NULL;
|
|
|
|
g_init_cb_arg = NULL;
|
2017-06-14 23:37:15 +00:00
|
|
|
|
|
|
|
cb_fn(cb_arg, rc);
|
|
|
|
}
|
|
|
|
|
2017-07-10 23:36:35 +00:00
|
|
|
static void
|
2017-08-25 07:30:10 +00:00
|
|
|
spdk_bdev_module_action_complete(void)
|
2017-07-10 23:36:35 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev_module_if *m;
|
|
|
|
|
|
|
|
/*
|
2017-08-24 15:36:25 +00:00
|
|
|
* Don't finish bdev subsystem initialization if
|
|
|
|
* module pre-initialization is still in progress, or
|
|
|
|
* the subsystem been already initialized.
|
|
|
|
*/
|
|
|
|
if (!g_bdev_mgr.module_init_complete || g_bdev_mgr.init_complete) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check all bdev modules for inits/examinations in progress. If any
|
2017-07-10 23:36:35 +00:00
|
|
|
* exist, return immediately since we cannot finish bdev subsystem
|
|
|
|
* initialization until all are completed.
|
|
|
|
*/
|
2017-07-13 04:06:22 +00:00
|
|
|
TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, tailq) {
|
2017-08-24 15:36:25 +00:00
|
|
|
if (m->action_in_progress > 0) {
|
2017-07-10 23:36:35 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-25 07:30:10 +00:00
|
|
|
/*
|
|
|
|
* Modules already finished initialization - now that all
|
2017-08-24 15:36:25 +00:00
|
|
|
* the bdev modules have finished their asynchronous I/O
|
2017-08-25 07:30:10 +00:00
|
|
|
* processing, the entire bdev layer can be marked as complete.
|
|
|
|
*/
|
2017-07-13 17:36:19 +00:00
|
|
|
spdk_bdev_init_complete(0);
|
2017-07-10 23:36:35 +00:00
|
|
|
}
|
|
|
|
|
2017-08-24 15:36:25 +00:00
|
|
|
static void
|
|
|
|
spdk_bdev_module_action_done(struct spdk_bdev_module_if *module)
|
|
|
|
{
|
|
|
|
assert(module->action_in_progress > 0);
|
|
|
|
module->action_in_progress--;
|
|
|
|
spdk_bdev_module_action_complete();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_module_init_done(struct spdk_bdev_module_if *module)
|
|
|
|
{
|
|
|
|
spdk_bdev_module_action_done(module);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_module_examine_done(struct spdk_bdev_module_if *module)
|
|
|
|
{
|
|
|
|
spdk_bdev_module_action_done(module);
|
|
|
|
}
|
|
|
|
|
2017-07-13 17:36:19 +00:00
|
|
|
static int
|
|
|
|
spdk_bdev_modules_init(void)
|
2017-05-26 04:58:04 +00:00
|
|
|
{
|
2017-07-13 17:36:19 +00:00
|
|
|
struct spdk_bdev_module_if *module;
|
2017-08-25 07:30:10 +00:00
|
|
|
int rc = 0;
|
2017-05-26 04:58:04 +00:00
|
|
|
|
2017-07-13 17:36:19 +00:00
|
|
|
TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, tailq) {
|
|
|
|
rc = module->module_init();
|
|
|
|
if (rc != 0) {
|
2017-08-25 07:30:10 +00:00
|
|
|
break;
|
2017-07-13 17:36:19 +00:00
|
|
|
}
|
2017-05-26 04:58:04 +00:00
|
|
|
}
|
|
|
|
|
2017-08-25 07:30:10 +00:00
|
|
|
g_bdev_mgr.module_init_complete = true;
|
|
|
|
return rc;
|
2017-05-26 04:58:04 +00:00
|
|
|
}
|
2017-06-15 19:01:53 +00:00
|
|
|
void
|
2017-11-17 21:49:36 +00:00
|
|
|
spdk_bdev_initialize(spdk_bdev_init_cb cb_fn, void *cb_arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-05-10 21:42:45 +00:00
|
|
|
int cache_size;
|
2017-05-09 21:27:36 +00:00
|
|
|
int rc = 0;
|
2017-08-17 02:01:54 +00:00
|
|
|
char mempool_name[32];
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-06-14 23:37:15 +00:00
|
|
|
assert(cb_fn != NULL);
|
|
|
|
|
2017-10-25 13:58:02 +00:00
|
|
|
g_init_cb_fn = cb_fn;
|
|
|
|
g_init_cb_arg = cb_arg;
|
2017-06-14 23:37:15 +00:00
|
|
|
|
2017-08-17 02:01:54 +00:00
|
|
|
snprintf(mempool_name, sizeof(mempool_name), "bdev_io_%d", getpid());
|
|
|
|
|
|
|
|
g_bdev_mgr.bdev_io_pool = spdk_mempool_create(mempool_name,
|
2017-05-09 21:09:28 +00:00
|
|
|
SPDK_BDEV_IO_POOL_SIZE,
|
|
|
|
sizeof(struct spdk_bdev_io) +
|
|
|
|
spdk_bdev_module_get_max_ctx_size(),
|
|
|
|
64,
|
|
|
|
SPDK_ENV_SOCKET_ID_ANY);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
if (g_bdev_mgr.bdev_io_pool == NULL) {
|
2017-08-18 13:22:25 +00:00
|
|
|
SPDK_ERRLOG("could not allocate spdk_bdev_io pool\n");
|
2017-08-24 11:42:06 +00:00
|
|
|
spdk_bdev_init_complete(-1);
|
2017-07-13 17:36:19 +00:00
|
|
|
return;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 21:27:36 +00:00
|
|
|
/**
|
|
|
|
* Ensure no more than half of the total buffers end up local caches, by
|
|
|
|
* using spdk_env_get_core_count() to determine how many local caches we need
|
|
|
|
* to account for.
|
|
|
|
*/
|
|
|
|
cache_size = BUF_SMALL_POOL_SIZE / (2 * spdk_env_get_core_count());
|
2017-08-17 02:01:54 +00:00
|
|
|
snprintf(mempool_name, sizeof(mempool_name), "buf_small_pool_%d", getpid());
|
|
|
|
|
|
|
|
g_bdev_mgr.buf_small_pool = spdk_mempool_create(mempool_name,
|
2017-05-09 21:27:36 +00:00
|
|
|
BUF_SMALL_POOL_SIZE,
|
|
|
|
SPDK_BDEV_SMALL_BUF_MAX_SIZE + 512,
|
|
|
|
cache_size,
|
|
|
|
SPDK_ENV_SOCKET_ID_ANY);
|
|
|
|
if (!g_bdev_mgr.buf_small_pool) {
|
|
|
|
SPDK_ERRLOG("create rbuf small pool failed\n");
|
2017-08-24 11:42:06 +00:00
|
|
|
spdk_bdev_init_complete(-1);
|
2017-07-13 17:36:19 +00:00
|
|
|
return;
|
2017-05-09 21:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cache_size = BUF_LARGE_POOL_SIZE / (2 * spdk_env_get_core_count());
|
2017-08-17 02:01:54 +00:00
|
|
|
snprintf(mempool_name, sizeof(mempool_name), "buf_large_pool_%d", getpid());
|
|
|
|
|
|
|
|
g_bdev_mgr.buf_large_pool = spdk_mempool_create(mempool_name,
|
2017-05-09 21:27:36 +00:00
|
|
|
BUF_LARGE_POOL_SIZE,
|
|
|
|
SPDK_BDEV_LARGE_BUF_MAX_SIZE + 512,
|
|
|
|
cache_size,
|
|
|
|
SPDK_ENV_SOCKET_ID_ANY);
|
|
|
|
if (!g_bdev_mgr.buf_large_pool) {
|
|
|
|
SPDK_ERRLOG("create rbuf large pool failed\n");
|
2017-08-24 11:42:06 +00:00
|
|
|
spdk_bdev_init_complete(-1);
|
2017-07-13 17:36:19 +00:00
|
|
|
return;
|
2017-05-09 21:27:36 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
g_bdev_mgr.zero_buffer = spdk_dma_zmalloc(ZERO_BUFFER_SIZE, ZERO_BUFFER_SIZE,
|
|
|
|
NULL);
|
|
|
|
if (!g_bdev_mgr.zero_buffer) {
|
|
|
|
SPDK_ERRLOG("create bdev zero buffer failed\n");
|
|
|
|
spdk_bdev_init_complete(-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-05-30 08:45:46 +00:00
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
g_bdev_mgr.domain = __itt_domain_create("spdk_bdev");
|
|
|
|
#endif
|
|
|
|
|
2017-05-09 22:07:56 +00:00
|
|
|
spdk_io_device_register(&g_bdev_mgr, spdk_bdev_mgmt_channel_create,
|
|
|
|
spdk_bdev_mgmt_channel_destroy,
|
|
|
|
sizeof(struct spdk_bdev_mgmt_channel));
|
|
|
|
|
2017-07-13 17:36:19 +00:00
|
|
|
rc = spdk_bdev_modules_init();
|
2017-08-25 07:30:10 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("bdev modules init failed\n");
|
|
|
|
spdk_bdev_init_complete(-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_bdev_module_action_complete();
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 13:58:02 +00:00
|
|
|
static void
|
|
|
|
spdk_bdev_module_finish_cb(void *io_device)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-10-25 13:58:02 +00:00
|
|
|
spdk_bdev_fini_cb cb_fn = g_fini_cb_fn;
|
2017-05-09 21:50:43 +00:00
|
|
|
|
2017-10-25 13:58:02 +00:00
|
|
|
cb_fn(g_fini_cb_arg);
|
|
|
|
g_fini_cb_fn = NULL;
|
|
|
|
g_fini_cb_arg = NULL;
|
|
|
|
}
|
2017-05-09 21:50:43 +00:00
|
|
|
|
2017-10-25 13:58:02 +00:00
|
|
|
static void
|
|
|
|
spdk_bdev_module_finish_complete(void)
|
|
|
|
{
|
2017-05-09 21:50:43 +00:00
|
|
|
if (spdk_mempool_count(g_bdev_mgr.bdev_io_pool) != SPDK_BDEV_IO_POOL_SIZE) {
|
|
|
|
SPDK_ERRLOG("bdev IO pool count is %zu but should be %u\n",
|
|
|
|
spdk_mempool_count(g_bdev_mgr.bdev_io_pool),
|
|
|
|
SPDK_BDEV_IO_POOL_SIZE);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-09 21:50:43 +00:00
|
|
|
if (spdk_mempool_count(g_bdev_mgr.buf_small_pool) != BUF_SMALL_POOL_SIZE) {
|
|
|
|
SPDK_ERRLOG("Small buffer pool count is %zu but should be %u\n",
|
|
|
|
spdk_mempool_count(g_bdev_mgr.buf_small_pool),
|
|
|
|
BUF_SMALL_POOL_SIZE);
|
|
|
|
assert(false);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-09 21:50:43 +00:00
|
|
|
if (spdk_mempool_count(g_bdev_mgr.buf_large_pool) != BUF_LARGE_POOL_SIZE) {
|
|
|
|
SPDK_ERRLOG("Large buffer pool count is %zu but should be %u\n",
|
|
|
|
spdk_mempool_count(g_bdev_mgr.buf_large_pool),
|
|
|
|
BUF_LARGE_POOL_SIZE);
|
|
|
|
assert(false);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-09 21:50:43 +00:00
|
|
|
spdk_mempool_free(g_bdev_mgr.bdev_io_pool);
|
|
|
|
spdk_mempool_free(g_bdev_mgr.buf_small_pool);
|
|
|
|
spdk_mempool_free(g_bdev_mgr.buf_large_pool);
|
2017-07-28 22:34:24 +00:00
|
|
|
spdk_dma_free(g_bdev_mgr.zero_buffer);
|
2017-05-09 21:50:43 +00:00
|
|
|
|
2017-10-25 13:58:02 +00:00
|
|
|
spdk_io_device_unregister(&g_bdev_mgr, spdk_bdev_module_finish_cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-11-10 18:16:51 +00:00
|
|
|
spdk_bdev_module_finish_iter(void *arg)
|
2017-10-25 13:58:02 +00:00
|
|
|
{
|
2017-11-10 18:16:51 +00:00
|
|
|
/* Notice that this variable is static. It is saved between calls to
|
|
|
|
* this function. */
|
|
|
|
static struct spdk_bdev_module_if *resume_bdev_module = NULL;
|
|
|
|
struct spdk_bdev_module_if *bdev_module;
|
|
|
|
|
|
|
|
/* Start iterating from the last touched module */
|
|
|
|
if (!resume_bdev_module) {
|
|
|
|
bdev_module = TAILQ_FIRST(&g_bdev_mgr.bdev_modules);
|
|
|
|
} else {
|
|
|
|
bdev_module = TAILQ_NEXT(resume_bdev_module, tailq);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (bdev_module) {
|
|
|
|
if (bdev_module->async_fini) {
|
|
|
|
/* Save our place so we can resume later. We must
|
|
|
|
* save the variable here, before calling module_fini()
|
|
|
|
* below, because in some cases the module may immediately
|
|
|
|
* call spdk_bdev_module_finish_done() and re-enter
|
|
|
|
* this function to continue iterating. */
|
|
|
|
resume_bdev_module = bdev_module;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bdev_module->module_fini) {
|
|
|
|
bdev_module->module_fini();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bdev_module->async_fini) {
|
|
|
|
return;
|
|
|
|
}
|
2017-10-25 13:58:02 +00:00
|
|
|
|
2017-11-10 18:16:51 +00:00
|
|
|
bdev_module = TAILQ_NEXT(bdev_module, tailq);
|
|
|
|
}
|
|
|
|
|
|
|
|
resume_bdev_module = NULL;
|
|
|
|
spdk_bdev_module_finish_complete();
|
2017-10-25 13:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_module_finish_done(void)
|
|
|
|
{
|
|
|
|
if (spdk_get_thread() != g_fini_thread) {
|
2017-11-10 18:16:51 +00:00
|
|
|
spdk_thread_send_msg(g_fini_thread, spdk_bdev_module_finish_iter, NULL);
|
2017-10-25 13:58:02 +00:00
|
|
|
} else {
|
2017-11-10 18:16:51 +00:00
|
|
|
spdk_bdev_module_finish_iter(NULL);
|
2017-10-25 13:58:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
assert(cb_fn != NULL);
|
|
|
|
|
|
|
|
g_fini_thread = spdk_get_thread();
|
|
|
|
|
|
|
|
g_fini_cb_fn = cb_fn;
|
|
|
|
g_fini_cb_arg = cb_arg;
|
|
|
|
|
2017-11-10 18:16:51 +00:00
|
|
|
spdk_bdev_module_finish_iter(NULL);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 21:01:12 +00:00
|
|
|
struct spdk_bdev_io *
|
|
|
|
spdk_bdev_get_io(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
|
2017-04-11 04:36:05 +00:00
|
|
|
if (!bdev_io) {
|
2016-07-20 18:16:23 +00:00
|
|
|
SPDK_ERRLOG("Unable to get spdk_bdev_io\n");
|
2016-11-07 21:03:27 +00:00
|
|
|
abort();
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 16:27:56 +00:00
|
|
|
memset(bdev_io, 0, offsetof(struct spdk_bdev_io, u));
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
return bdev_io;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_bdev_put_io(struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
2017-05-09 20:32:20 +00:00
|
|
|
if (bdev_io->buf != NULL) {
|
2017-05-05 20:15:51 +00:00
|
|
|
spdk_bdev_io_put_buf(bdev_io);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
static void
|
2017-09-07 21:42:50 +00:00
|
|
|
spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-09-07 21:42:50 +00:00
|
|
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
2017-09-14 21:02:09 +00:00
|
|
|
struct spdk_bdev_channel *bdev_ch = bdev_io->ch;
|
|
|
|
struct spdk_io_channel *ch = bdev_ch->channel;
|
|
|
|
|
|
|
|
assert(bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING);
|
|
|
|
|
|
|
|
bdev_ch->io_outstanding++;
|
|
|
|
bdev_io->in_submit_request = true;
|
2017-09-08 18:44:50 +00:00
|
|
|
if (spdk_likely(bdev_ch->flags == 0)) {
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
if (spdk_likely(TAILQ_EMPTY(&bdev_ch->nomem_io))) {
|
|
|
|
bdev->fn_table->submit_request(ch, bdev_io);
|
|
|
|
} else {
|
|
|
|
bdev_ch->io_outstanding--;
|
|
|
|
TAILQ_INSERT_TAIL(&bdev_ch->nomem_io, bdev_io, link);
|
|
|
|
}
|
2017-09-08 18:44:50 +00:00
|
|
|
} else if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
|
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
|
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
2017-09-14 21:02:09 +00:00
|
|
|
bdev_io->in_submit_request = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_bdev_io_submit_reset(struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
|
|
|
struct spdk_bdev_channel *bdev_ch = bdev_io->ch;
|
|
|
|
struct spdk_io_channel *ch = bdev_ch->channel;
|
2017-05-04 20:18:03 +00:00
|
|
|
|
2017-01-11 23:38:11 +00:00
|
|
|
assert(bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING);
|
2017-05-04 20:18:03 +00:00
|
|
|
|
2017-01-12 17:58:20 +00:00
|
|
|
bdev_io->in_submit_request = true;
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev->fn_table->submit_request(ch, bdev_io);
|
2017-01-12 17:58:20 +00:00
|
|
|
bdev_io->in_submit_request = false;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_bdev_io_init(struct spdk_bdev_io *bdev_io,
|
|
|
|
struct spdk_bdev *bdev, void *cb_arg,
|
|
|
|
spdk_bdev_io_completion_cb cb)
|
|
|
|
{
|
|
|
|
bdev_io->bdev = bdev;
|
|
|
|
bdev_io->caller_ctx = cb_arg;
|
|
|
|
bdev_io->cb = cb;
|
|
|
|
bdev_io->status = SPDK_BDEV_IO_STATUS_PENDING;
|
2017-01-12 17:58:20 +00:00
|
|
|
bdev_io->in_submit_request = false;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2016-08-24 17:25:49 +00:00
|
|
|
bool
|
|
|
|
spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
|
|
|
|
{
|
2017-04-04 21:10:00 +00:00
|
|
|
return bdev->fn_table->io_type_supported(bdev->ctxt, io_type);
|
2016-08-24 17:25:49 +00:00
|
|
|
}
|
|
|
|
|
2016-11-18 17:22:58 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_dump_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
if (bdev->fn_table->dump_config_json) {
|
2017-04-04 21:10:00 +00:00
|
|
|
return bdev->fn_table->dump_config_json(bdev->ctxt, w);
|
2016-11-18 17:22:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-04 21:01:54 +00:00
|
|
|
static int
|
2017-05-18 17:48:04 +00:00
|
|
|
spdk_bdev_channel_create(void *io_device, void *ctx_buf)
|
2017-04-04 21:01:54 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev = io_device;
|
|
|
|
struct spdk_bdev_channel *ch = ctx_buf;
|
|
|
|
|
|
|
|
ch->bdev = io_device;
|
2017-05-18 17:48:04 +00:00
|
|
|
ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
|
2017-10-02 12:45:06 +00:00
|
|
|
if (!ch->channel) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-05-09 22:07:56 +00:00
|
|
|
ch->mgmt_channel = spdk_get_io_channel(&g_bdev_mgr);
|
2017-10-02 12:45:06 +00:00
|
|
|
if (!ch->mgmt_channel) {
|
|
|
|
spdk_put_io_channel(ch->channel);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-04-06 21:40:29 +00:00
|
|
|
memset(&ch->stat, 0, sizeof(ch->stat));
|
2017-05-22 17:49:12 +00:00
|
|
|
ch->io_outstanding = 0;
|
2017-09-08 19:34:49 +00:00
|
|
|
TAILQ_INIT(&ch->queued_resets);
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
TAILQ_INIT(&ch->nomem_io);
|
|
|
|
ch->nomem_threshold = 0;
|
2017-09-08 18:44:50 +00:00
|
|
|
ch->flags = 0;
|
2017-04-04 21:01:54 +00:00
|
|
|
|
2017-05-30 08:45:46 +00:00
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
{
|
|
|
|
char *name;
|
2017-06-15 16:59:02 +00:00
|
|
|
__itt_init_ittlib(NULL, 0);
|
2017-05-30 08:45:46 +00:00
|
|
|
name = spdk_sprintf_alloc("spdk_bdev_%s_%p", ch->bdev->name, ch);
|
|
|
|
if (!name) {
|
2017-10-02 12:45:06 +00:00
|
|
|
spdk_put_io_channel(ch->channel);
|
|
|
|
spdk_put_io_channel(ch->mgmt_channel);
|
2017-05-30 08:45:46 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ch->handle = __itt_string_handle_create(name);
|
|
|
|
free(name);
|
|
|
|
ch->start_tsc = spdk_get_ticks();
|
|
|
|
ch->interval_tsc = spdk_get_ticks_hz() / 100;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-04-04 21:01:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-12 19:14:48 +00:00
|
|
|
/*
|
|
|
|
* Abort I/O that are waiting on a data buffer. These types of I/O are
|
|
|
|
* linked using the spdk_bdev_io buf_link TAILQ_ENTRY.
|
|
|
|
*/
|
2017-04-04 21:01:54 +00:00
|
|
|
static void
|
2017-09-12 19:14:48 +00:00
|
|
|
_spdk_bdev_abort_buf_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
|
2017-04-04 21:01:54 +00:00
|
|
|
{
|
2017-05-25 20:11:33 +00:00
|
|
|
struct spdk_bdev_io *bdev_io, *tmp;
|
2017-05-11 16:48:07 +00:00
|
|
|
|
2017-05-25 20:11:33 +00:00
|
|
|
TAILQ_FOREACH_SAFE(bdev_io, queue, buf_link, tmp) {
|
2017-05-11 16:48:07 +00:00
|
|
|
if (bdev_io->ch == ch) {
|
2017-05-25 20:11:33 +00:00
|
|
|
TAILQ_REMOVE(queue, bdev_io, buf_link);
|
2017-05-11 16:48:07 +00:00
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
}
|
2017-05-25 20:11:33 +00:00
|
|
|
}
|
2017-05-11 16:48:07 +00:00
|
|
|
|
2017-09-12 19:14:48 +00:00
|
|
|
/*
|
|
|
|
* Abort I/O that are queued waiting for submission. These types of I/O are
|
|
|
|
* linked using the spdk_bdev_io link TAILQ_ENTRY.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_spdk_bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io, *tmp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(bdev_io, queue, link, tmp) {
|
|
|
|
if (bdev_io->ch == ch) {
|
|
|
|
TAILQ_REMOVE(queue, bdev_io, link);
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
/*
|
|
|
|
* spdk_bdev_io_complete() assumes that the completed I/O had
|
|
|
|
* been submitted to the bdev module. Since in this case it
|
|
|
|
* hadn't, bump io_outstanding to account for the decrement
|
|
|
|
* that spdk_bdev_io_complete() will do.
|
|
|
|
*/
|
|
|
|
if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
|
|
|
|
ch->io_outstanding++;
|
|
|
|
}
|
2017-09-12 19:14:48 +00:00
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-25 20:11:33 +00:00
|
|
|
static void
|
|
|
|
spdk_bdev_channel_destroy(void *io_device, void *ctx_buf)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_channel *ch = ctx_buf;
|
2017-05-10 21:42:45 +00:00
|
|
|
struct spdk_bdev_mgmt_channel *mgmt_channel;
|
|
|
|
|
|
|
|
mgmt_channel = spdk_io_channel_get_ctx(ch->mgmt_channel);
|
2017-05-25 20:11:33 +00:00
|
|
|
|
2017-09-12 19:14:48 +00:00
|
|
|
_spdk_bdev_abort_queued_io(&ch->queued_resets, ch);
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
_spdk_bdev_abort_queued_io(&ch->nomem_io, ch);
|
2017-09-12 19:14:48 +00:00
|
|
|
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, ch);
|
|
|
|
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, ch);
|
2017-04-04 21:01:54 +00:00
|
|
|
|
|
|
|
spdk_put_io_channel(ch->channel);
|
2017-05-09 22:07:56 +00:00
|
|
|
spdk_put_io_channel(ch->mgmt_channel);
|
2017-06-08 18:35:12 +00:00
|
|
|
assert(ch->io_outstanding == 0);
|
2017-04-04 21:01:54 +00:00
|
|
|
}
|
|
|
|
|
2016-09-16 19:53:32 +00:00
|
|
|
struct spdk_io_channel *
|
2017-06-29 18:23:50 +00:00
|
|
|
spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
|
2016-09-16 19:53:32 +00:00
|
|
|
{
|
2017-06-29 18:23:50 +00:00
|
|
|
return spdk_get_io_channel(desc->bdev);
|
2016-09-16 19:53:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-10 20:29:31 +00:00
|
|
|
const char *
|
|
|
|
spdk_bdev_get_name(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->name;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->product_name;
|
|
|
|
}
|
|
|
|
|
2017-05-12 17:29:00 +00:00
|
|
|
uint32_t
|
|
|
|
spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->blocklen;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->blockcnt;
|
|
|
|
}
|
|
|
|
|
2017-05-09 17:57:32 +00:00
|
|
|
size_t
|
|
|
|
spdk_bdev_get_buf_align(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
/* TODO: push this logic down to the bdev modules */
|
|
|
|
if (bdev->need_aligned_buffer) {
|
|
|
|
return bdev->blocklen;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-08-16 20:56:10 +00:00
|
|
|
uint32_t
|
|
|
|
spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->optimal_io_boundary;
|
|
|
|
}
|
|
|
|
|
2017-05-16 19:57:33 +00:00
|
|
|
bool
|
|
|
|
spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->write_cache;
|
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
/*
|
|
|
|
* Convert I/O offset and length from bytes to blocks.
|
|
|
|
*
|
|
|
|
* Returns zero on success or non-zero if the byte parameters aren't divisible by the block size.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
spdk_bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t offset_bytes, uint64_t *offset_blocks,
|
|
|
|
uint64_t num_bytes, uint64_t *num_blocks)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-08-28 21:55:35 +00:00
|
|
|
uint32_t block_size = bdev->blocklen;
|
2017-08-04 21:18:13 +00:00
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
*offset_blocks = offset_bytes / block_size;
|
|
|
|
*num_blocks = num_bytes / block_size;
|
|
|
|
|
|
|
|
return (offset_bytes % block_size) | (num_bytes % block_size);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
static bool
|
|
|
|
spdk_bdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t offset_blocks, uint64_t num_blocks)
|
|
|
|
{
|
|
|
|
/* Return failure if offset_blocks + num_blocks is less than offset_blocks; indicates there
|
2016-07-20 18:16:23 +00:00
|
|
|
* has been an overflow and hence the offset has been wrapped around */
|
2017-08-28 21:55:35 +00:00
|
|
|
if (offset_blocks + num_blocks < offset_blocks) {
|
2017-08-04 21:15:46 +00:00
|
|
|
return false;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
/* Return failure if offset_blocks + num_blocks exceeds the size of the bdev */
|
|
|
|
if (offset_blocks + num_blocks > bdev->blockcnt) {
|
2017-08-04 21:15:46 +00:00
|
|
|
return false;
|
2016-10-18 13:45:01 +00:00
|
|
|
}
|
|
|
|
|
2017-08-04 21:15:46 +00:00
|
|
|
return true;
|
2016-10-18 13:45:01 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_read(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2016-10-18 13:45:01 +00:00
|
|
|
void *buf, uint64_t offset, uint64_t nbytes,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
void *buf, uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2016-10-18 13:45:01 +00:00
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-10-18 13:45:01 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-10-18 13:45:01 +00:00
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
2017-06-05 18:39:38 +00:00
|
|
|
return -EINVAL;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("spdk_bdev_io memory allocation failed duing read\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iov.iov_base = buf;
|
|
|
|
bdev_io->u.bdev.iov.iov_len = num_blocks * bdev->blocklen;
|
|
|
|
bdev_io->u.bdev.iovs = &bdev_io->u.bdev.iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = 1;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
2016-10-04 14:39:27 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2016-10-04 14:39:27 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2016-10-04 14:39:27 +00:00
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t offset, uint64_t nbytes,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2016-10-04 14:39:27 +00:00
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-10-04 14:39:27 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-10-04 14:39:27 +00:00
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
2017-06-05 18:39:38 +00:00
|
|
|
return -EINVAL;
|
2016-10-04 14:39:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("spdk_bdev_io memory allocation failed duing read\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2016-10-04 14:39:27 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-10-04 14:39:27 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iovs = iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = iovcnt;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_write(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2016-09-14 17:34:48 +00:00
|
|
|
void *buf, uint64_t offset, uint64_t nbytes,
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_write_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
void *buf, uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
if (!desc->write) {
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
2017-06-05 18:39:38 +00:00
|
|
|
return -EINVAL;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
2017-07-13 04:08:53 +00:00
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed duing write\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iov.iov_base = buf;
|
|
|
|
bdev_io->u.bdev.iov.iov_len = num_blocks * bdev->blocklen;
|
|
|
|
bdev_io->u.bdev.iovs = &bdev_io->u.bdev.iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = 1;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2016-07-20 18:16:23 +00:00
|
|
|
struct iovec *iov, int iovcnt,
|
2016-09-14 17:34:48 +00:00
|
|
|
uint64_t offset, uint64_t len,
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, len, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
if (!desc->write) {
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
2017-06-05 18:39:38 +00:00
|
|
|
return -EINVAL;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed duing writev\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iovs = iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = iovcnt;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-08-01 18:28:29 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_write_zeroes(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
uint64_t offset, uint64_t len,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, len, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_write_zeroes_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-01 18:28:29 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2017-07-28 22:34:24 +00:00
|
|
|
uint64_t len;
|
|
|
|
bool split_request = false;
|
|
|
|
|
|
|
|
if (num_blocks > UINT64_MAX / spdk_bdev_get_block_size(bdev)) {
|
|
|
|
SPDK_ERRLOG("length argument out of range in write_zeroes\n");
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
2017-08-01 18:28:29 +00:00
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
2017-08-01 18:28:29 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
2017-07-28 22:34:24 +00:00
|
|
|
|
2017-08-01 18:28:29 +00:00
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed duing write_zeroes\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io->ch = channel;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
2017-08-01 18:28:29 +00:00
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
|
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
|
|
|
bdev_io->u.bdev.iovs = NULL;
|
|
|
|
bdev_io->u.bdev.iovcnt = 0;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
assert(spdk_bdev_get_block_size(bdev) <= ZERO_BUFFER_SIZE);
|
|
|
|
|
|
|
|
len = spdk_bdev_get_block_size(bdev) * num_blocks;
|
|
|
|
|
|
|
|
if (len > ZERO_BUFFER_SIZE) {
|
|
|
|
split_request = true;
|
|
|
|
len = ZERO_BUFFER_SIZE;
|
|
|
|
}
|
2017-08-01 18:28:29 +00:00
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
|
|
|
|
bdev_io->u.bdev.iov.iov_base = g_bdev_mgr.zero_buffer;
|
|
|
|
bdev_io->u.bdev.iov.iov_len = len;
|
|
|
|
bdev_io->u.bdev.iovs = &bdev_io->u.bdev.iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = 1;
|
|
|
|
bdev_io->u.bdev.num_blocks = len / spdk_bdev_get_block_size(bdev);
|
|
|
|
bdev_io->split_remaining_num_blocks = num_blocks - bdev_io->u.bdev.num_blocks;
|
|
|
|
bdev_io->split_current_offset_blocks = offset_blocks + bdev_io->u.bdev.num_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (split_request) {
|
|
|
|
bdev_io->stored_user_cb = cb;
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, spdk_bdev_write_zeroes_split);
|
|
|
|
} else {
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
}
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-08-01 18:28:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_unmap(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2017-07-19 21:32:04 +00:00
|
|
|
uint64_t offset, uint64_t nbytes,
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_unmap_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
if (!desc->write) {
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
2017-06-05 18:39:38 +00:00
|
|
|
return -EINVAL;
|
2016-10-05 20:57:49 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (num_blocks == 0) {
|
2017-07-19 21:32:04 +00:00
|
|
|
SPDK_ERRLOG("Can't unmap 0 bytes\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -EINVAL;
|
2016-10-05 20:57:49 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed duing unmap\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
|
2017-10-18 13:16:02 +00:00
|
|
|
bdev_io->u.bdev.iov.iov_base = NULL;
|
|
|
|
bdev_io->u.bdev.iov.iov_len = 0;
|
2017-09-08 13:07:10 +00:00
|
|
|
bdev_io->u.bdev.iovs = &bdev_io->u.bdev.iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = 1;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_flush(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2016-07-20 18:16:23 +00:00
|
|
|
uint64_t offset, uint64_t length,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2017-08-28 21:55:35 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_blocks, num_blocks;
|
|
|
|
|
|
|
|
if (spdk_bdev_bytes_to_blocks(desc->bdev, offset, &offset_blocks, length, &num_blocks) != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_bdev_flush_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-04-04 21:01:54 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
if (!desc->write) {
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2017-08-28 21:55:35 +00:00
|
|
|
if (!spdk_bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed duing flush\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 20:18:03 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
|
2017-09-08 13:07:10 +00:00
|
|
|
bdev_io->u.bdev.iovs = NULL;
|
|
|
|
bdev_io->u.bdev.iovcnt = 0;
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.offset_blocks = offset_blocks;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-25 20:11:33 +00:00
|
|
|
static void
|
2017-12-11 22:14:19 +00:00
|
|
|
_spdk_bdev_reset_dev(struct spdk_io_channel_iter *i, int status)
|
2017-05-25 20:11:33 +00:00
|
|
|
{
|
2017-12-11 22:14:19 +00:00
|
|
|
struct spdk_bdev_channel *ch = spdk_io_channel_iter_get_ctx(i);
|
2017-09-12 16:34:55 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-05-25 20:11:33 +00:00
|
|
|
|
2017-09-12 16:34:55 +00:00
|
|
|
bdev_io = TAILQ_FIRST(&ch->queued_resets);
|
|
|
|
TAILQ_REMOVE(&ch->queued_resets, bdev_io, link);
|
2017-09-14 21:02:09 +00:00
|
|
|
spdk_bdev_io_submit_reset(bdev_io);
|
2017-05-25 20:11:33 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
static void
|
|
|
|
_spdk_bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
|
2017-05-25 20:11:33 +00:00
|
|
|
{
|
2017-12-11 22:14:19 +00:00
|
|
|
struct spdk_io_channel *ch;
|
2017-05-25 20:11:33 +00:00
|
|
|
struct spdk_bdev_channel *channel;
|
2017-05-10 21:42:45 +00:00
|
|
|
struct spdk_bdev_mgmt_channel *mgmt_channel;
|
2017-05-25 20:11:33 +00:00
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
ch = spdk_io_channel_iter_get_channel(i);
|
2017-05-25 20:11:33 +00:00
|
|
|
channel = spdk_io_channel_get_ctx(ch);
|
2017-05-10 21:42:45 +00:00
|
|
|
mgmt_channel = spdk_io_channel_get_ctx(channel->mgmt_channel);
|
2017-05-25 20:11:33 +00:00
|
|
|
|
2017-09-08 18:44:50 +00:00
|
|
|
channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
|
|
|
|
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
_spdk_bdev_abort_queued_io(&channel->nomem_io, channel);
|
2017-09-12 19:14:48 +00:00
|
|
|
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, channel);
|
|
|
|
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, channel);
|
2017-11-16 07:42:37 +00:00
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
spdk_for_each_channel_continue(i, 0);
|
2017-05-25 20:11:33 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 14:17:12 +00:00
|
|
|
static void
|
|
|
|
_spdk_bdev_start_reset(void *ctx)
|
|
|
|
{
|
2017-09-12 16:34:55 +00:00
|
|
|
struct spdk_bdev_channel *ch = ctx;
|
2017-06-15 14:17:12 +00:00
|
|
|
|
2017-12-06 21:52:20 +00:00
|
|
|
spdk_for_each_channel(ch->bdev, _spdk_bdev_reset_freeze_channel,
|
2017-09-12 16:34:55 +00:00
|
|
|
ch, _spdk_bdev_reset_dev);
|
2017-06-15 14:17:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-09-08 19:34:49 +00:00
|
|
|
_spdk_bdev_channel_start_reset(struct spdk_bdev_channel *ch)
|
2017-06-15 14:17:12 +00:00
|
|
|
{
|
2017-09-08 19:34:49 +00:00
|
|
|
struct spdk_bdev *bdev = ch->bdev;
|
2017-06-15 14:17:12 +00:00
|
|
|
|
2017-09-08 19:34:49 +00:00
|
|
|
assert(!TAILQ_EMPTY(&ch->queued_resets));
|
2017-06-15 14:17:12 +00:00
|
|
|
|
2017-09-08 19:34:49 +00:00
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
2017-09-12 19:14:48 +00:00
|
|
|
if (bdev->reset_in_progress == NULL) {
|
|
|
|
bdev->reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
|
2017-09-12 16:34:55 +00:00
|
|
|
/*
|
|
|
|
* Take a channel reference for the target bdev for the life of this
|
|
|
|
* reset. This guards against the channel getting destroyed while
|
|
|
|
* spdk_for_each_channel() calls related to this reset IO are in
|
|
|
|
* progress. We will release the reference when this reset is
|
|
|
|
* completed.
|
|
|
|
*/
|
2017-09-12 19:14:48 +00:00
|
|
|
bdev->reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(bdev);
|
2017-09-12 16:34:55 +00:00
|
|
|
_spdk_bdev_start_reset(ch);
|
2017-06-15 14:17:12 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2017-05-23 17:51:50 +00:00
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed duing reset\n");
|
2017-09-26 21:45:22 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-23 17:51:50 +00:00
|
|
|
bdev_io->ch = channel;
|
2016-07-20 18:16:23 +00:00
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
|
2017-09-12 16:34:55 +00:00
|
|
|
bdev_io->u.reset.ch_ref = NULL;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-06-15 14:17:12 +00:00
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
2017-09-08 19:34:49 +00:00
|
|
|
TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, link);
|
2017-06-15 14:17:12 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
|
|
|
|
2017-09-08 19:34:49 +00:00
|
|
|
_spdk_bdev_channel_start_reset(channel);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-25 20:11:33 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 21:40:29 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
|
|
|
|
struct spdk_bdev_io_stat *stat)
|
|
|
|
{
|
2017-05-30 08:45:46 +00:00
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
SPDK_ERRLOG("Calling spdk_bdev_get_io_stat is not allowed when VTune integration is enabled.\n");
|
|
|
|
memset(stat, 0, sizeof(*stat));
|
|
|
|
return;
|
|
|
|
#endif
|
2017-04-06 21:40:29 +00:00
|
|
|
|
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
*stat = channel->stat;
|
|
|
|
memset(&channel->stat, 0, sizeof(channel->stat));
|
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_nvme_admin_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2017-05-13 20:12:13 +00:00
|
|
|
const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2017-05-13 20:12:13 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
if (!desc->write) {
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2017-05-13 20:12:13 +00:00
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed during nvme_admin_passthru\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2017-05-13 20:12:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io->ch = channel;
|
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
|
|
|
|
bdev_io->u.nvme_passthru.cmd = *cmd;
|
|
|
|
bdev_io->u.nvme_passthru.buf = buf;
|
|
|
|
bdev_io->u.nvme_passthru.nbytes = nbytes;
|
2017-11-14 06:33:11 +00:00
|
|
|
bdev_io->u.nvme_passthru.md_buf = NULL;
|
|
|
|
bdev_io->u.nvme_passthru.md_len = 0;
|
2017-05-13 20:12:13 +00:00
|
|
|
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2017-05-13 20:12:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
int
|
2017-07-06 19:39:19 +00:00
|
|
|
spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
2017-06-05 18:02:09 +00:00
|
|
|
const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2017-06-05 18:02:09 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
if (!desc->write) {
|
|
|
|
/*
|
|
|
|
* Do not try to parse the NVMe command - we could maybe use bits in the opcode
|
|
|
|
* to easily determine if the command is a read or write, but for now just
|
|
|
|
* do not allow io_passthru with a read-only descriptor.
|
|
|
|
*/
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2017-06-05 18:02:09 +00:00
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed during nvme_admin_passthru\n");
|
2017-06-05 18:39:38 +00:00
|
|
|
return -ENOMEM;
|
2017-06-05 18:02:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io->ch = channel;
|
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO;
|
|
|
|
bdev_io->u.nvme_passthru.cmd = *cmd;
|
|
|
|
bdev_io->u.nvme_passthru.buf = buf;
|
|
|
|
bdev_io->u.nvme_passthru.nbytes = nbytes;
|
2017-11-14 06:33:11 +00:00
|
|
|
bdev_io->u.nvme_passthru.md_buf = NULL;
|
|
|
|
bdev_io->u.nvme_passthru.md_len = 0;
|
|
|
|
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
|
|
|
spdk_bdev_io_submit(bdev_io);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_nvme_io_passthru_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
if (!desc->write) {
|
|
|
|
/*
|
|
|
|
* Do not try to parse the NVMe command - we could maybe use bits in the opcode
|
|
|
|
* to easily determine if the command is a read or write, but for now just
|
|
|
|
* do not allow io_passthru with a read-only descriptor.
|
|
|
|
*/
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io = spdk_bdev_get_io();
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io memory allocation failed during nvme_admin_passthru\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io->ch = channel;
|
|
|
|
bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO_MD;
|
|
|
|
bdev_io->u.nvme_passthru.cmd = *cmd;
|
|
|
|
bdev_io->u.nvme_passthru.buf = buf;
|
|
|
|
bdev_io->u.nvme_passthru.nbytes = nbytes;
|
|
|
|
bdev_io->u.nvme_passthru.md_buf = md_buf;
|
|
|
|
bdev_io->u.nvme_passthru.md_len = md_len;
|
2017-06-05 18:02:09 +00:00
|
|
|
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
|
|
|
|
|
2017-09-07 21:51:14 +00:00
|
|
|
spdk_bdev_io_submit(bdev_io);
|
2017-06-05 18:39:38 +00:00
|
|
|
return 0;
|
2017-06-05 18:02:09 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
|
|
|
if (!bdev_io) {
|
|
|
|
SPDK_ERRLOG("bdev_io is NULL\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING) {
|
|
|
|
SPDK_ERRLOG("bdev_io is in pending state\n");
|
2017-05-04 19:31:57 +00:00
|
|
|
assert(false);
|
2016-07-20 18:16:23 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-01-11 23:38:11 +00:00
|
|
|
spdk_bdev_put_io(bdev_io);
|
|
|
|
|
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
static void
|
|
|
|
_spdk_bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev = bdev_ch->bdev;
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
|
|
|
|
if (bdev_ch->io_outstanding > bdev_ch->nomem_threshold) {
|
|
|
|
/*
|
|
|
|
* Allow some more I/O to complete before retrying the nomem_io queue.
|
|
|
|
* Some drivers (such as nvme) cannot immediately take a new I/O in
|
|
|
|
* the context of a completion, because the resources for the I/O are
|
|
|
|
* not released until control returns to the bdev poller. Also, we
|
|
|
|
* may require several small I/O to complete before a larger I/O
|
|
|
|
* (that requires splitting) can be submitted.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!TAILQ_EMPTY(&bdev_ch->nomem_io)) {
|
|
|
|
bdev_io = TAILQ_FIRST(&bdev_ch->nomem_io);
|
|
|
|
TAILQ_REMOVE(&bdev_ch->nomem_io, bdev_io, link);
|
|
|
|
bdev_ch->io_outstanding++;
|
|
|
|
bdev_io->status = SPDK_BDEV_IO_STATUS_PENDING;
|
|
|
|
bdev->fn_table->submit_request(bdev_ch->channel, bdev_io);
|
|
|
|
if (bdev_io->status == SPDK_BDEV_IO_STATUS_NOMEM) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-12 17:58:20 +00:00
|
|
|
static void
|
2017-06-15 18:48:27 +00:00
|
|
|
_spdk_bdev_io_complete(void *ctx)
|
2017-01-12 17:58:20 +00:00
|
|
|
{
|
2017-06-12 22:47:30 +00:00
|
|
|
struct spdk_bdev_io *bdev_io = ctx;
|
2017-01-12 17:58:20 +00:00
|
|
|
|
2017-06-15 18:48:27 +00:00
|
|
|
assert(bdev_io->cb != NULL);
|
|
|
|
bdev_io->cb(bdev_io, bdev_io->status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->caller_ctx);
|
2017-01-12 17:58:20 +00:00
|
|
|
}
|
|
|
|
|
2017-12-06 21:52:20 +00:00
|
|
|
static void
|
2017-12-11 22:14:19 +00:00
|
|
|
_spdk_bdev_reset_complete(struct spdk_io_channel_iter *i, int status)
|
2017-12-06 21:52:20 +00:00
|
|
|
{
|
2017-12-11 22:14:19 +00:00
|
|
|
struct spdk_bdev_io *bdev_io = spdk_io_channel_iter_get_ctx(i);
|
2017-12-06 21:52:20 +00:00
|
|
|
|
|
|
|
if (bdev_io->u.reset.ch_ref != NULL) {
|
|
|
|
spdk_put_io_channel(bdev_io->u.reset.ch_ref);
|
|
|
|
bdev_io->u.reset.ch_ref = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
_spdk_bdev_io_complete(bdev_io);
|
|
|
|
}
|
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
static void
|
|
|
|
_spdk_bdev_unfreeze_channel(struct spdk_io_channel_iter *i)
|
2017-12-06 21:52:20 +00:00
|
|
|
{
|
2017-12-11 22:14:19 +00:00
|
|
|
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
2017-12-06 21:52:20 +00:00
|
|
|
struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
|
|
|
|
|
|
|
|
ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
|
|
|
|
if (!TAILQ_EMPTY(&ch->queued_resets)) {
|
|
|
|
_spdk_bdev_channel_start_reset(ch);
|
|
|
|
}
|
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
spdk_for_each_channel_continue(i, 0);
|
2017-12-06 21:52:20 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
|
|
|
|
{
|
2017-09-15 22:23:49 +00:00
|
|
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
|
|
|
struct spdk_bdev_channel *bdev_ch = bdev_io->ch;
|
|
|
|
|
2017-06-12 22:47:30 +00:00
|
|
|
bdev_io->status = status;
|
|
|
|
|
2017-09-14 21:02:09 +00:00
|
|
|
if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) {
|
2017-12-06 21:52:20 +00:00
|
|
|
bool unlock_channels = false;
|
|
|
|
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
|
|
|
|
SPDK_ERRLOG("NOMEM returned for reset\n");
|
|
|
|
}
|
2017-09-15 22:23:49 +00:00
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
|
|
|
if (bdev_io == bdev->reset_in_progress) {
|
|
|
|
bdev->reset_in_progress = NULL;
|
2017-12-06 21:52:20 +00:00
|
|
|
unlock_channels = true;
|
2017-09-12 19:14:48 +00:00
|
|
|
}
|
2017-09-15 22:23:49 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
2017-12-06 21:52:20 +00:00
|
|
|
|
|
|
|
if (unlock_channels) {
|
|
|
|
spdk_for_each_channel(bdev, _spdk_bdev_unfreeze_channel, bdev_io,
|
|
|
|
_spdk_bdev_reset_complete);
|
|
|
|
return;
|
2017-09-12 16:34:55 +00:00
|
|
|
}
|
2017-09-14 21:02:09 +00:00
|
|
|
} else {
|
2017-09-15 22:23:49 +00:00
|
|
|
assert(bdev_ch->io_outstanding > 0);
|
|
|
|
bdev_ch->io_outstanding--;
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
if (spdk_likely(status != SPDK_BDEV_IO_STATUS_NOMEM)) {
|
|
|
|
if (spdk_unlikely(!TAILQ_EMPTY(&bdev_ch->nomem_io))) {
|
|
|
|
_spdk_bdev_ch_retry_io(bdev_ch);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_HEAD(&bdev_ch->nomem_io, bdev_io, link);
|
|
|
|
/*
|
|
|
|
* Wait for some of the outstanding I/O to complete before we
|
|
|
|
* retry any of the nomem_io. Normally we will wait for
|
|
|
|
* NOMEM_THRESHOLD_COUNT I/O to complete but for low queue
|
|
|
|
* depth channels we will instead wait for half to complete.
|
|
|
|
*/
|
|
|
|
bdev_ch->nomem_threshold = spdk_max(bdev_ch->io_outstanding / 2,
|
|
|
|
bdev_ch->io_outstanding - NOMEM_THRESHOLD_COUNT);
|
|
|
|
return;
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-12 22:47:30 +00:00
|
|
|
if (status == SPDK_BDEV_IO_STATUS_SUCCESS) {
|
2017-04-06 21:40:29 +00:00
|
|
|
switch (bdev_io->type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
2017-09-15 22:23:49 +00:00
|
|
|
bdev_ch->stat.bytes_read += bdev_io->u.bdev.num_blocks * bdev->blocklen;
|
|
|
|
bdev_ch->stat.num_read_ops++;
|
2017-04-06 21:40:29 +00:00
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
2017-09-15 22:23:49 +00:00
|
|
|
bdev_ch->stat.bytes_written += bdev_io->u.bdev.num_blocks * bdev->blocklen;
|
|
|
|
bdev_ch->stat.num_write_ops++;
|
2017-04-06 21:40:29 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-30 08:45:46 +00:00
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
|
|
|
uint64_t now_tsc = spdk_get_ticks();
|
2017-09-15 22:23:49 +00:00
|
|
|
if (now_tsc > (bdev_ch->start_tsc + bdev_ch->interval_tsc)) {
|
2017-06-15 16:59:02 +00:00
|
|
|
uint64_t data[5];
|
2017-05-30 08:45:46 +00:00
|
|
|
|
2017-09-15 22:23:49 +00:00
|
|
|
data[0] = bdev_ch->stat.num_read_ops;
|
|
|
|
data[1] = bdev_ch->stat.bytes_read;
|
|
|
|
data[2] = bdev_ch->stat.num_write_ops;
|
|
|
|
data[3] = bdev_ch->stat.bytes_written;
|
|
|
|
data[4] = bdev->fn_table->get_spin_time ?
|
|
|
|
bdev->fn_table->get_spin_time(bdev_ch->channel) : 0;
|
2017-05-30 08:45:46 +00:00
|
|
|
|
2017-09-15 22:23:49 +00:00
|
|
|
__itt_metadata_add(g_bdev_mgr.domain, __itt_null, bdev_ch->handle,
|
2017-06-15 16:59:02 +00:00
|
|
|
__itt_metadata_u64, 5, data);
|
2017-05-30 08:45:46 +00:00
|
|
|
|
2017-09-15 22:23:49 +00:00
|
|
|
memset(&bdev_ch->stat, 0, sizeof(bdev_ch->stat));
|
|
|
|
bdev_ch->start_tsc = now_tsc;
|
2017-05-30 08:45:46 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-12-06 21:52:20 +00:00
|
|
|
if (bdev_io->in_submit_request) {
|
2017-06-15 18:48:27 +00:00
|
|
|
/*
|
|
|
|
* Defer completion to avoid potential infinite recursion if the
|
|
|
|
* user's completion callback issues a new I/O.
|
|
|
|
*/
|
2017-09-15 22:23:49 +00:00
|
|
|
spdk_thread_send_msg(spdk_io_channel_get_thread(bdev_ch->channel),
|
2017-06-15 18:48:27 +00:00
|
|
|
_spdk_bdev_io_complete, bdev_io);
|
|
|
|
} else {
|
|
|
|
_spdk_bdev_io_complete(bdev_io);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2016-11-07 20:14:47 +00:00
|
|
|
void
|
2017-01-18 22:20:31 +00:00
|
|
|
spdk_bdev_io_complete_scsi_status(struct spdk_bdev_io *bdev_io, enum spdk_scsi_status sc,
|
|
|
|
enum spdk_scsi_sense sk, uint8_t asc, uint8_t ascq)
|
|
|
|
{
|
|
|
|
if (sc == SPDK_SCSI_STATUS_GOOD) {
|
|
|
|
bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
|
|
|
} else {
|
|
|
|
bdev_io->status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
|
|
|
|
bdev_io->error.scsi.sc = sc;
|
|
|
|
bdev_io->error.scsi.sk = sk;
|
|
|
|
bdev_io->error.scsi.asc = asc;
|
|
|
|
bdev_io->error.scsi.ascq = ascq;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_bdev_io_complete(bdev_io, bdev_io->status);
|
2016-11-07 20:14:47 +00:00
|
|
|
}
|
|
|
|
|
2017-01-18 22:15:35 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
|
|
|
|
int *sc, int *sk, int *asc, int *ascq)
|
|
|
|
{
|
|
|
|
assert(sc != NULL);
|
|
|
|
assert(sk != NULL);
|
|
|
|
assert(asc != NULL);
|
|
|
|
assert(ascq != NULL);
|
|
|
|
|
|
|
|
switch (bdev_io->status) {
|
|
|
|
case SPDK_BDEV_IO_STATUS_SUCCESS:
|
|
|
|
*sc = SPDK_SCSI_STATUS_GOOD;
|
|
|
|
*sk = SPDK_SCSI_SENSE_NO_SENSE;
|
|
|
|
*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
|
|
|
|
*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_STATUS_NVME_ERROR:
|
|
|
|
spdk_scsi_nvme_translate(bdev_io, sc, sk, asc, ascq);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
|
|
|
|
*sc = bdev_io->error.scsi.sc;
|
|
|
|
*sk = bdev_io->error.scsi.sk;
|
|
|
|
*asc = bdev_io->error.scsi.asc;
|
|
|
|
*ascq = bdev_io->error.scsi.ascq;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
*sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
|
|
|
|
*sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
|
|
|
|
*asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
|
|
|
|
*ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 21:43:15 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, int sct, int sc)
|
|
|
|
{
|
|
|
|
if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
|
|
|
|
bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
|
|
|
} else {
|
|
|
|
bdev_io->error.nvme.sct = sct;
|
|
|
|
bdev_io->error.nvme.sc = sc;
|
|
|
|
bdev_io->status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_bdev_io_complete(bdev_io, bdev_io->status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, int *sct, int *sc)
|
|
|
|
{
|
|
|
|
assert(sct != NULL);
|
|
|
|
assert(sc != NULL);
|
|
|
|
|
|
|
|
if (bdev_io->status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
|
|
|
|
*sct = bdev_io->error.nvme.sct;
|
|
|
|
*sc = bdev_io->error.nvme.sc;
|
|
|
|
} else if (bdev_io->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
|
|
|
|
*sct = SPDK_NVME_SCT_GENERIC;
|
|
|
|
*sc = SPDK_NVME_SC_SUCCESS;
|
|
|
|
} else {
|
|
|
|
*sct = SPDK_NVME_SCT_GENERIC;
|
|
|
|
*sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-11 16:47:52 +00:00
|
|
|
struct spdk_thread *
|
|
|
|
spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
|
|
|
return spdk_io_channel_get_thread(bdev_io->ch->channel);
|
|
|
|
}
|
|
|
|
|
2017-11-20 09:31:39 +00:00
|
|
|
static int
|
2017-06-29 20:16:26 +00:00
|
|
|
_spdk_bdev_register(struct spdk_bdev *bdev)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-07-13 04:06:22 +00:00
|
|
|
struct spdk_bdev_module_if *module;
|
2017-02-10 20:18:49 +00:00
|
|
|
|
2017-07-07 00:36:17 +00:00
|
|
|
assert(bdev->module != NULL);
|
|
|
|
|
2017-11-20 09:31:39 +00:00
|
|
|
if (!bdev->name) {
|
|
|
|
SPDK_ERRLOG("Bdev name is NULL\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_bdev_get_by_name(bdev->name)) {
|
|
|
|
SPDK_ERRLOG("Bdev name:%s already exists\n", bdev->name);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2017-06-29 18:23:50 +00:00
|
|
|
bdev->status = SPDK_BDEV_STATUS_READY;
|
|
|
|
|
|
|
|
TAILQ_INIT(&bdev->open_descs);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-06-29 20:16:26 +00:00
|
|
|
TAILQ_INIT(&bdev->vbdevs);
|
|
|
|
TAILQ_INIT(&bdev->base_bdevs);
|
|
|
|
|
2017-09-12 19:14:48 +00:00
|
|
|
bdev->reset_in_progress = NULL;
|
2017-06-15 14:17:12 +00:00
|
|
|
|
2017-04-04 21:01:54 +00:00
|
|
|
spdk_io_device_register(bdev, spdk_bdev_channel_create, spdk_bdev_channel_destroy,
|
|
|
|
sizeof(struct spdk_bdev_channel));
|
|
|
|
|
2017-01-10 16:54:23 +00:00
|
|
|
pthread_mutex_init(&bdev->mutex, NULL);
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Inserting bdev %s into list\n", bdev->name);
|
2017-05-09 21:09:28 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, link);
|
2017-02-10 20:18:49 +00:00
|
|
|
|
2017-07-13 04:06:22 +00:00
|
|
|
TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, tailq) {
|
|
|
|
if (module->examine) {
|
2017-08-24 15:36:25 +00:00
|
|
|
module->action_in_progress++;
|
2017-07-13 04:06:22 +00:00
|
|
|
module->examine(bdev);
|
|
|
|
}
|
2017-02-10 20:18:49 +00:00
|
|
|
}
|
2017-11-20 09:31:39 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-11-20 09:31:39 +00:00
|
|
|
int
|
2017-06-29 20:16:26 +00:00
|
|
|
spdk_bdev_register(struct spdk_bdev *bdev)
|
|
|
|
{
|
2017-11-20 09:31:39 +00:00
|
|
|
return _spdk_bdev_register(bdev);
|
2017-06-29 20:16:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-20 09:31:39 +00:00
|
|
|
int
|
2017-06-29 20:16:26 +00:00
|
|
|
spdk_vbdev_register(struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs, int base_bdev_count)
|
|
|
|
{
|
2017-11-20 09:31:39 +00:00
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
rc = _spdk_bdev_register(vbdev);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
2017-06-29 20:16:26 +00:00
|
|
|
|
|
|
|
for (i = 0; i < base_bdev_count; i++) {
|
|
|
|
assert(base_bdevs[i] != NULL);
|
|
|
|
TAILQ_INSERT_TAIL(&vbdev->base_bdevs, base_bdevs[i], base_bdev_link);
|
|
|
|
TAILQ_INSERT_TAIL(&base_bdevs[i]->vbdevs, vbdev, vbdev_link);
|
|
|
|
}
|
2017-11-20 09:31:39 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-06-29 20:16:26 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
void
|
2017-10-25 09:11:59 +00:00
|
|
|
spdk_bdev_unregister_done(struct spdk_bdev *bdev, int bdeverrno)
|
|
|
|
{
|
|
|
|
if (bdev->unregister_cb != NULL) {
|
|
|
|
bdev->unregister_cb(bdev->unregister_ctx, bdeverrno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-06-29 18:23:50 +00:00
|
|
|
struct spdk_bdev_desc *desc, *tmp;
|
2016-07-20 18:16:23 +00:00
|
|
|
int rc;
|
2017-07-24 16:43:16 +00:00
|
|
|
bool do_destruct = true;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Removing bdev %s from list\n", bdev->name);
|
2017-01-25 01:04:14 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
2017-06-29 18:23:50 +00:00
|
|
|
|
|
|
|
bdev->status = SPDK_BDEV_STATUS_REMOVING;
|
2017-10-25 09:11:59 +00:00
|
|
|
bdev->unregister_cb = cb_fn;
|
|
|
|
bdev->unregister_ctx = cb_arg;
|
2017-06-29 18:23:50 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(desc, &bdev->open_descs, link, tmp) {
|
|
|
|
if (desc->remove_cb) {
|
2017-01-25 01:04:14 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
2017-07-24 16:43:16 +00:00
|
|
|
do_destruct = false;
|
2017-06-29 18:23:50 +00:00
|
|
|
desc->remove_cb(desc->remove_ctx);
|
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
2017-01-25 01:04:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-24 16:43:16 +00:00
|
|
|
if (!do_destruct) {
|
2017-06-29 18:23:50 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-05-09 21:09:28 +00:00
|
|
|
TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, link);
|
2017-01-25 01:04:14 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-01-10 16:54:23 +00:00
|
|
|
pthread_mutex_destroy(&bdev->mutex);
|
|
|
|
|
2017-07-20 12:40:51 +00:00
|
|
|
spdk_io_device_unregister(bdev, NULL);
|
2017-04-04 21:01:54 +00:00
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
rc = bdev->fn_table->destruct(bdev->ctxt);
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("destruct failed\n");
|
|
|
|
}
|
2017-10-25 09:11:59 +00:00
|
|
|
if (rc <= 0 && cb_fn != NULL) {
|
|
|
|
cb_fn(cb_arg, rc);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 20:16:26 +00:00
|
|
|
void
|
2017-10-25 09:11:59 +00:00
|
|
|
spdk_vbdev_unregister(struct spdk_bdev *vbdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
|
2017-06-29 20:16:26 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev *base_bdev;
|
|
|
|
|
|
|
|
assert(!TAILQ_EMPTY(&vbdev->base_bdevs));
|
|
|
|
TAILQ_FOREACH(base_bdev, &vbdev->base_bdevs, base_bdev_link) {
|
|
|
|
TAILQ_REMOVE(&base_bdev->vbdevs, vbdev, vbdev_link);
|
|
|
|
}
|
2017-10-25 09:11:59 +00:00
|
|
|
spdk_bdev_unregister(vbdev, cb_fn, cb_arg);
|
2017-06-29 20:16:26 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 18:23:50 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
|
|
|
|
void *remove_ctx, struct spdk_bdev_desc **_desc)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_desc *desc;
|
|
|
|
|
|
|
|
desc = calloc(1, sizeof(*desc));
|
|
|
|
if (desc == NULL) {
|
2017-10-30 07:47:50 +00:00
|
|
|
SPDK_ERRLOG("Failed to allocate memory for bdev descriptor\n");
|
2017-06-29 18:23:50 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-01-10 16:54:23 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
|
|
|
|
2017-09-18 12:15:34 +00:00
|
|
|
if (write && bdev->claim_module) {
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_INFOLOG(SPDK_LOG_BDEV, "Could not open %s - already claimed\n", bdev->name);
|
2017-06-29 18:23:50 +00:00
|
|
|
free(desc);
|
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
|
|
|
return -EPERM;
|
2017-01-10 16:54:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 18:23:50 +00:00
|
|
|
TAILQ_INSERT_TAIL(&bdev->open_descs, desc, link);
|
|
|
|
|
|
|
|
desc->bdev = bdev;
|
|
|
|
desc->remove_cb = remove_cb;
|
|
|
|
desc->remove_ctx = remove_ctx;
|
|
|
|
desc->write = write;
|
|
|
|
*_desc = desc;
|
|
|
|
|
2017-01-10 16:54:23 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
|
|
|
|
2017-06-29 18:23:50 +00:00
|
|
|
return 0;
|
2017-01-10 16:54:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-06-29 18:23:50 +00:00
|
|
|
spdk_bdev_close(struct spdk_bdev_desc *desc)
|
2017-01-10 16:54:23 +00:00
|
|
|
{
|
2017-06-29 18:23:50 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2017-01-25 01:04:14 +00:00
|
|
|
bool do_unregister = false;
|
2017-01-10 16:54:23 +00:00
|
|
|
|
2017-01-25 01:04:14 +00:00
|
|
|
pthread_mutex_lock(&bdev->mutex);
|
2017-06-29 18:23:50 +00:00
|
|
|
|
|
|
|
TAILQ_REMOVE(&bdev->open_descs, desc, link);
|
|
|
|
free(desc);
|
|
|
|
|
2017-07-17 10:00:08 +00:00
|
|
|
if (bdev->status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->open_descs)) {
|
2017-01-25 01:04:14 +00:00
|
|
|
do_unregister = true;
|
|
|
|
}
|
2017-01-10 16:54:23 +00:00
|
|
|
pthread_mutex_unlock(&bdev->mutex);
|
2017-01-25 01:04:14 +00:00
|
|
|
|
|
|
|
if (do_unregister == true) {
|
2017-10-25 09:11:59 +00:00
|
|
|
spdk_bdev_unregister(bdev, bdev->unregister_cb, bdev->unregister_ctx);
|
2017-01-25 01:04:14 +00:00
|
|
|
}
|
2017-01-10 16:54:23 +00:00
|
|
|
}
|
|
|
|
|
2017-07-07 23:04:52 +00:00
|
|
|
int
|
2017-07-13 04:06:22 +00:00
|
|
|
spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
|
|
|
|
struct spdk_bdev_module_if *module)
|
2017-07-07 23:04:52 +00:00
|
|
|
{
|
2017-07-13 04:06:22 +00:00
|
|
|
if (bdev->claim_module != NULL) {
|
2017-07-07 23:04:52 +00:00
|
|
|
SPDK_ERRLOG("bdev %s already claimed by module %s\n", bdev->name,
|
2017-07-13 04:06:22 +00:00
|
|
|
bdev->claim_module->name);
|
2017-07-07 23:04:52 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (desc && !desc->write) {
|
|
|
|
desc->write = true;
|
|
|
|
}
|
|
|
|
|
2017-07-13 04:06:22 +00:00
|
|
|
bdev->claim_module = module;
|
2017-07-07 23:04:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-07-13 04:06:22 +00:00
|
|
|
spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
|
2017-07-07 23:04:52 +00:00
|
|
|
{
|
2017-07-13 04:06:22 +00:00
|
|
|
assert(bdev->claim_module != NULL);
|
|
|
|
bdev->claim_module = NULL;
|
2017-07-07 23:04:52 +00:00
|
|
|
}
|
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
struct spdk_bdev *
|
|
|
|
spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->bdev;
|
|
|
|
}
|
|
|
|
|
2017-05-04 20:57:07 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
|
|
|
|
{
|
|
|
|
struct iovec *iovs;
|
|
|
|
int iovcnt;
|
|
|
|
|
|
|
|
if (bdev_io == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (bdev_io->type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
2017-09-20 13:10:17 +00:00
|
|
|
iovs = bdev_io->u.bdev.iovs;
|
|
|
|
iovcnt = bdev_io->u.bdev.iovcnt;
|
2017-05-04 20:57:07 +00:00
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
2017-09-20 13:10:17 +00:00
|
|
|
iovs = bdev_io->u.bdev.iovs;
|
|
|
|
iovcnt = bdev_io->u.bdev.iovcnt;
|
2017-05-04 20:57:07 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
iovs = NULL;
|
|
|
|
iovcnt = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iovp) {
|
|
|
|
*iovp = iovs;
|
|
|
|
}
|
|
|
|
if (iovcntp) {
|
|
|
|
*iovcntp = iovcnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 21:01:12 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_module_list_add(struct spdk_bdev_module_if *bdev_module)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-07-13 04:06:22 +00:00
|
|
|
/*
|
|
|
|
* Modules with examine callbacks must be initialized first, so they are
|
|
|
|
* ready to handle examine callbacks from later modules that will
|
|
|
|
* register physical bdevs.
|
|
|
|
*/
|
|
|
|
if (bdev_module->examine != NULL) {
|
|
|
|
TAILQ_INSERT_HEAD(&g_bdev_mgr.bdev_modules, bdev_module, tailq);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&g_bdev_mgr.bdev_modules, bdev_module, tailq);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2017-08-25 21:22:46 +00:00
|
|
|
|
2017-08-29 05:24:52 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_part_base_free(struct spdk_bdev_part_base *base)
|
|
|
|
{
|
2017-09-14 18:34:36 +00:00
|
|
|
if (base->desc) {
|
|
|
|
spdk_bdev_close(base->desc);
|
|
|
|
base->desc = NULL;
|
|
|
|
}
|
|
|
|
base->base_free_fn(base);
|
2017-08-29 05:24:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_part_free(struct spdk_bdev_part *part)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part_base *base;
|
|
|
|
|
|
|
|
assert(part);
|
|
|
|
assert(part->base);
|
|
|
|
|
|
|
|
base = part->base;
|
|
|
|
spdk_io_device_unregister(&part->base, NULL);
|
|
|
|
TAILQ_REMOVE(base->tailq, part, tailq);
|
|
|
|
free(part->bdev.name);
|
2017-09-11 21:43:33 +00:00
|
|
|
free(part);
|
2017-08-29 05:24:52 +00:00
|
|
|
|
|
|
|
if (__sync_sub_and_fetch(&base->ref, 1) == 0) {
|
2017-09-14 14:35:23 +00:00
|
|
|
spdk_bdev_module_release_bdev(base->bdev);
|
2017-08-29 05:24:52 +00:00
|
|
|
spdk_bdev_part_base_free(base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_part_tailq_fini(struct bdev_part_tailq *tailq)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part, *tmp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) {
|
|
|
|
spdk_bdev_part_free(part);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_part_base_hotremove(struct spdk_bdev *base_bdev, struct bdev_part_tailq *tailq)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part, *tmp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) {
|
|
|
|
if (part->base->bdev == base_bdev) {
|
2017-10-25 09:11:59 +00:00
|
|
|
spdk_vbdev_unregister(&part->bdev, NULL, NULL);
|
2017-08-29 05:24:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
spdk_bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part = _part;
|
|
|
|
|
|
|
|
return part->base->bdev->fn_table->io_type_supported(part->base->bdev, io_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct spdk_io_channel *
|
|
|
|
spdk_bdev_part_get_io_channel(void *_part)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part = _part;
|
|
|
|
|
|
|
|
return spdk_get_io_channel(&part->base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_io *part_io = cb_arg;
|
|
|
|
int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
|
|
|
|
|
|
|
|
spdk_bdev_io_complete(part_io, status);
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
}
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
static void
|
|
|
|
spdk_bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
|
|
|
{
|
|
|
|
uint64_t len;
|
|
|
|
|
|
|
|
if (!success) {
|
2017-10-23 20:43:50 +00:00
|
|
|
bdev_io->cb = bdev_io->stored_user_cb;
|
|
|
|
_spdk_bdev_io_complete(bdev_io);
|
2017-07-28 22:34:24 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no need to perform the error checking from write_zeroes_blocks because this request already passed those checks. */
|
|
|
|
len = spdk_min(spdk_bdev_get_block_size(bdev_io->bdev) * bdev_io->split_remaining_num_blocks,
|
|
|
|
ZERO_BUFFER_SIZE);
|
|
|
|
|
|
|
|
bdev_io->u.bdev.offset_blocks = bdev_io->split_current_offset_blocks;
|
|
|
|
bdev_io->u.bdev.iov.iov_len = len;
|
|
|
|
bdev_io->u.bdev.num_blocks = len / spdk_bdev_get_block_size(bdev_io->bdev);
|
|
|
|
bdev_io->split_remaining_num_blocks -= bdev_io->u.bdev.num_blocks;
|
|
|
|
bdev_io->split_current_offset_blocks += bdev_io->u.bdev.num_blocks;
|
|
|
|
|
|
|
|
/* if this round completes the i/o, change the callback to be the original user callback */
|
|
|
|
if (bdev_io->split_remaining_num_blocks == 0) {
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev_io->bdev, cb_arg, bdev_io->stored_user_cb);
|
|
|
|
} else {
|
|
|
|
spdk_bdev_io_init(bdev_io, bdev_io->bdev, cb_arg, spdk_bdev_write_zeroes_split);
|
|
|
|
}
|
|
|
|
spdk_bdev_io_submit(bdev_io);
|
|
|
|
}
|
|
|
|
|
2017-08-29 05:24:52 +00:00
|
|
|
void
|
|
|
|
spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part = ch->part;
|
|
|
|
struct spdk_io_channel *base_ch = ch->base_ch;
|
|
|
|
struct spdk_bdev_desc *base_desc = part->base->desc;
|
|
|
|
uint64_t offset;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* Modify the I/O to adjust for the offset within the base bdev. */
|
|
|
|
switch (bdev_io->type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
2017-09-20 13:10:17 +00:00
|
|
|
offset = bdev_io->u.bdev.offset_blocks + part->offset_blocks;
|
|
|
|
rc = spdk_bdev_readv_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt, offset,
|
|
|
|
bdev_io->u.bdev.num_blocks, spdk_bdev_part_complete_io,
|
2017-08-29 05:24:52 +00:00
|
|
|
bdev_io);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
2017-09-20 13:10:17 +00:00
|
|
|
offset = bdev_io->u.bdev.offset_blocks + part->offset_blocks;
|
|
|
|
rc = spdk_bdev_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt, offset,
|
|
|
|
bdev_io->u.bdev.num_blocks, spdk_bdev_part_complete_io,
|
2017-08-29 05:24:52 +00:00
|
|
|
bdev_io);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
|
2017-09-20 13:10:17 +00:00
|
|
|
offset = bdev_io->u.bdev.offset_blocks + part->offset_blocks;
|
|
|
|
rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, offset, bdev_io->u.bdev.num_blocks,
|
2017-08-29 05:24:52 +00:00
|
|
|
spdk_bdev_part_complete_io, bdev_io);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_UNMAP:
|
2017-09-20 13:10:17 +00:00
|
|
|
offset = bdev_io->u.bdev.offset_blocks + part->offset_blocks;
|
|
|
|
rc = spdk_bdev_unmap_blocks(base_desc, base_ch, offset, bdev_io->u.bdev.num_blocks,
|
2017-08-29 05:24:52 +00:00
|
|
|
spdk_bdev_part_complete_io, bdev_io);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_FLUSH:
|
2017-09-20 13:10:17 +00:00
|
|
|
offset = bdev_io->u.bdev.offset_blocks + part->offset_blocks;
|
|
|
|
rc = spdk_bdev_flush_blocks(base_desc, base_ch, offset, bdev_io->u.bdev.num_blocks,
|
2017-08-29 05:24:52 +00:00
|
|
|
spdk_bdev_part_complete_io, bdev_io);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_RESET:
|
|
|
|
rc = spdk_bdev_reset(base_desc, base_ch,
|
|
|
|
spdk_bdev_part_complete_io, bdev_io);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPDK_ERRLOG("split: unknown I/O type %d\n", bdev_io->type);
|
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc != 0) {
|
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static int
|
|
|
|
spdk_bdev_part_channel_create_cb(void *io_device, void *ctx_buf)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part = SPDK_CONTAINEROF(io_device, struct spdk_bdev_part, base);
|
|
|
|
struct spdk_bdev_part_channel *ch = ctx_buf;
|
|
|
|
|
|
|
|
ch->part = part;
|
|
|
|
ch->base_ch = spdk_bdev_get_io_channel(part->base->desc);
|
|
|
|
if (ch->base_ch == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (part->base->ch_create_cb) {
|
|
|
|
return part->base->ch_create_cb(io_device, ctx_buf);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_part *part = SPDK_CONTAINEROF(io_device, struct spdk_bdev_part, base);
|
|
|
|
struct spdk_bdev_part_channel *ch = ctx_buf;
|
|
|
|
|
|
|
|
if (part->base->ch_destroy_cb) {
|
|
|
|
part->base->ch_destroy_cb(io_device, ctx_buf);
|
|
|
|
}
|
|
|
|
spdk_put_io_channel(ch->base_ch);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_part_base_construct(struct spdk_bdev_part_base *base, struct spdk_bdev *bdev,
|
|
|
|
spdk_bdev_remove_cb_t remove_cb, struct spdk_bdev_module_if *module,
|
|
|
|
struct spdk_bdev_fn_table *fn_table, struct bdev_part_tailq *tailq,
|
2017-09-14 18:34:36 +00:00
|
|
|
spdk_bdev_part_base_free_fn free_fn,
|
2017-08-29 05:24:52 +00:00
|
|
|
uint32_t channel_size, spdk_io_channel_create_cb ch_create_cb,
|
|
|
|
spdk_io_channel_destroy_cb ch_destroy_cb)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
fn_table->get_io_channel = spdk_bdev_part_get_io_channel;
|
|
|
|
fn_table->io_type_supported = spdk_bdev_part_io_type_supported;
|
|
|
|
|
|
|
|
base->bdev = bdev;
|
2017-09-14 18:34:36 +00:00
|
|
|
base->desc = NULL;
|
2017-08-29 05:24:52 +00:00
|
|
|
base->ref = 0;
|
|
|
|
base->module = module;
|
|
|
|
base->fn_table = fn_table;
|
|
|
|
base->tailq = tailq;
|
|
|
|
base->claimed = false;
|
|
|
|
base->channel_size = channel_size;
|
|
|
|
base->ch_create_cb = ch_create_cb;
|
|
|
|
base->ch_destroy_cb = ch_destroy_cb;
|
2017-09-14 18:34:36 +00:00
|
|
|
base->base_free_fn = free_fn;
|
2017-08-29 05:24:52 +00:00
|
|
|
|
|
|
|
rc = spdk_bdev_open(bdev, false, remove_cb, bdev, &base->desc);
|
|
|
|
if (rc) {
|
2017-09-14 18:34:36 +00:00
|
|
|
spdk_bdev_part_base_free(base);
|
2017-08-29 05:24:52 +00:00
|
|
|
SPDK_ERRLOG("could not open bdev %s\n", spdk_bdev_get_name(bdev));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base,
|
|
|
|
char *name, uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
char *product_name)
|
|
|
|
{
|
|
|
|
part->bdev.name = name;
|
|
|
|
part->bdev.blocklen = base->bdev->blocklen;
|
|
|
|
part->bdev.blockcnt = num_blocks;
|
|
|
|
part->offset_blocks = offset_blocks;
|
|
|
|
|
|
|
|
part->bdev.write_cache = base->bdev->write_cache;
|
|
|
|
part->bdev.need_aligned_buffer = base->bdev->need_aligned_buffer;
|
|
|
|
part->bdev.product_name = product_name;
|
|
|
|
part->bdev.ctxt = part;
|
|
|
|
part->bdev.module = base->module;
|
|
|
|
part->bdev.fn_table = base->fn_table;
|
|
|
|
|
|
|
|
__sync_fetch_and_add(&base->ref, 1);
|
|
|
|
part->base = base;
|
|
|
|
|
|
|
|
if (!base->claimed) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_module_claim_bdev(base->bdev, base->desc, base->module);
|
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(base->bdev));
|
|
|
|
free(part->bdev.name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
base->claimed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_io_device_register(&part->base, spdk_bdev_part_channel_create_cb,
|
|
|
|
spdk_bdev_part_channel_destroy_cb,
|
|
|
|
base->channel_size);
|
|
|
|
spdk_vbdev_register(&part->bdev, &base->bdev, 1);
|
|
|
|
TAILQ_INSERT_TAIL(base->tailq, part, tailq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-30 18:06:33 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT("bdev", SPDK_LOG_BDEV)
|