bdev: encapsulate private members of spdk_bdev

Change-Id: Ica5abcfe5f9b73217e2d91c33c2cd418e061cf96
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/416458
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Seth Howell 2018-06-21 13:03:02 -07:00 committed by Ben Walker
parent 7f86c35e11
commit 86947c8917
5 changed files with 189 additions and 184 deletions

View File

@ -219,12 +219,6 @@ struct spdk_bdev {
/** Number of blocks */ /** Number of blocks */
uint64_t blockcnt; uint64_t blockcnt;
/** Quality of service parameters */
struct spdk_bdev_qos *qos;
/** True if the state of the QoS is being modified */
bool qos_mod_in_progress;
/** write cache enabled, not used at the moment */ /** write cache enabled, not used at the moment */
int write_cache; int write_cache;
@ -254,40 +248,50 @@ struct spdk_bdev {
/** function table for all LUN ops */ /** function table for all LUN ops */
const struct spdk_bdev_fn_table *fn_table; const struct spdk_bdev_fn_table *fn_table;
/** Mutex protecting claimed */
pthread_mutex_t mutex;
/** The bdev status */
enum spdk_bdev_status status;
/** The array of block devices that this block device is built on top of (if any). */
struct spdk_bdev **base_bdevs;
size_t base_bdevs_cnt;
/** The array of virtual block devices built on top of this block device. */ /** The array of virtual block devices built on top of this block device. */
struct spdk_bdev **vbdevs; struct spdk_bdev **vbdevs;
size_t vbdevs_cnt; size_t vbdevs_cnt;
/** /** Fields that are used internally by the bdev subsystem. Bdev modules
* Pointer to the module that has claimed this bdev for purposes of creating virtual * must not read or write to these fields.
* bdevs on top of it. Set to NULL if the bdev has not been claimed.
*/ */
struct spdk_bdev_module *claim_module; struct __bdev_internal_fields {
/** Quality of service parameters */
struct spdk_bdev_qos *qos;
/** Callback function that will be called after bdev destruct is completed. */ /** True if the state of the QoS is being modified */
spdk_bdev_unregister_cb unregister_cb; bool qos_mod_in_progress;
/** Unregister call context */ /** Mutex protecting claimed */
void *unregister_ctx; pthread_mutex_t mutex;
/** List of open descriptors for this block device. */ /** The bdev status */
TAILQ_HEAD(, spdk_bdev_desc) open_descs; enum spdk_bdev_status status;
TAILQ_ENTRY(spdk_bdev) link; /** The array of block devices that this block device is built on top of (if any). */
struct spdk_bdev **base_bdevs;
size_t base_bdevs_cnt;
/** points to a reset bdev_io if one is in progress. */ /**
struct spdk_bdev_io *reset_in_progress; * Pointer to the module that has claimed this bdev for purposes of creating virtual
* bdevs on top of it. Set to NULL if the bdev has not been claimed.
*/
struct spdk_bdev_module *claim_module;
/** Callback function that will be called after bdev destruct is completed. */
spdk_bdev_unregister_cb unregister_cb;
/** Unregister call context */
void *unregister_ctx;
/** List of open descriptors for this block device. */
TAILQ_HEAD(, spdk_bdev_desc) open_descs;
TAILQ_ENTRY(spdk_bdev) link;
/** points to a reset bdev_io if one is in progress. */
struct spdk_bdev_io *reset_in_progress;
} internal;
}; };
typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
@ -350,7 +354,8 @@ struct spdk_bdev_io {
/** It may be used by modules to put the bdev_io into its own list. */ /** It may be used by modules to put the bdev_io into its own list. */
TAILQ_ENTRY(spdk_bdev_io) module_link; TAILQ_ENTRY(spdk_bdev_io) module_link;
/** Fields that are used internally by the bdev subsystem. Bdev modules /**
* Fields that are used internally by the bdev subsystem. Bdev modules
* must not read or write to these fields. * must not read or write to these fields.
*/ */
struct __bdev_io_internal_fields { struct __bdev_io_internal_fields {

View File

@ -304,7 +304,7 @@ spdk_bdev_next(struct spdk_bdev *prev)
{ {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
bdev = TAILQ_NEXT(prev, link); bdev = TAILQ_NEXT(prev, internal.link);
if (bdev) { if (bdev) {
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Continuing bdev iteration at %s\n", bdev->name); SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Continuing bdev iteration at %s\n", bdev->name);
} }
@ -316,10 +316,10 @@ static struct spdk_bdev *
_bdev_next_leaf(struct spdk_bdev *bdev) _bdev_next_leaf(struct spdk_bdev *bdev)
{ {
while (bdev != NULL) { while (bdev != NULL) {
if (bdev->claim_module == NULL) { if (bdev->internal.claim_module == NULL) {
return bdev; return bdev;
} else { } else {
bdev = TAILQ_NEXT(bdev, link); bdev = TAILQ_NEXT(bdev, internal.link);
} }
} }
@ -345,7 +345,7 @@ spdk_bdev_next_leaf(struct spdk_bdev *prev)
{ {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
bdev = _bdev_next_leaf(TAILQ_NEXT(prev, link)); bdev = _bdev_next_leaf(TAILQ_NEXT(prev, internal.link));
if (bdev) { if (bdev) {
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Continuing bdev iteration at %s\n", bdev->name); SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Continuing bdev iteration at %s\n", bdev->name);
@ -512,7 +512,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
} }
} }
TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, link) { TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
spdk_bdev_config_json(bdev, w); spdk_bdev_config_json(bdev, w);
} }
@ -874,7 +874,7 @@ _spdk_bdev_finish_unregister_bdevs_iter(void *cb_arg, int bdeverrno)
* bdev; try to continue by manually removing this bdev from the list and continue * bdev; try to continue by manually removing this bdev from the list and continue
* with the next bdev in the list. * with the next bdev in the list.
*/ */
TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, link); TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
} }
if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) { if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) {
@ -993,7 +993,7 @@ _spdk_bdev_qos_io_submit(struct spdk_bdev_channel *ch)
{ {
struct spdk_bdev_io *bdev_io = NULL; struct spdk_bdev_io *bdev_io = NULL;
struct spdk_bdev *bdev = ch->bdev; struct spdk_bdev *bdev = ch->bdev;
struct spdk_bdev_qos *qos = bdev->qos; struct spdk_bdev_qos *qos = bdev->internal.qos;
struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource; struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
while (!TAILQ_EMPTY(&qos->queued)) { while (!TAILQ_EMPTY(&qos->queued)) {
@ -1043,7 +1043,7 @@ _spdk_bdev_io_submit(void *ctx)
} else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) { } else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
bdev_ch->io_outstanding--; bdev_ch->io_outstanding--;
shared_resource->io_outstanding--; shared_resource->io_outstanding--;
TAILQ_INSERT_TAIL(&bdev->qos->queued, bdev_io, internal.link); TAILQ_INSERT_TAIL(&bdev->internal.qos->queued, bdev_io, internal.link);
_spdk_bdev_qos_io_submit(bdev_ch); _spdk_bdev_qos_io_submit(bdev_ch);
} else { } else {
SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags); SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
@ -1061,12 +1061,12 @@ spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io)
assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING); assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
if (bdev_io->internal.ch->flags & BDEV_CH_QOS_ENABLED) { if (bdev_io->internal.ch->flags & BDEV_CH_QOS_ENABLED) {
if (thread == bdev->qos->thread) { if (thread == bdev->internal.qos->thread) {
_spdk_bdev_io_submit(bdev_io); _spdk_bdev_io_submit(bdev_io);
} else { } else {
bdev_io->internal.io_submit_ch = bdev_io->internal.ch; bdev_io->internal.io_submit_ch = bdev_io->internal.ch;
bdev_io->internal.ch = bdev->qos->ch; bdev_io->internal.ch = bdev->internal.qos->ch;
spdk_thread_send_msg(bdev->qos->thread, _spdk_bdev_io_submit, bdev_io); spdk_thread_send_msg(bdev->internal.qos->thread, _spdk_bdev_io_submit, bdev_io);
} }
} else { } else {
_spdk_bdev_io_submit(bdev_io); _spdk_bdev_io_submit(bdev_io);
@ -1195,11 +1195,11 @@ _spdk_bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
} }
} }
/* Caller must hold bdev->mutex. */ /* Caller must hold bdev->internal.mutex. */
static int static int
_spdk_bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch) _spdk_bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
{ {
struct spdk_bdev_qos *qos = bdev->qos; struct spdk_bdev_qos *qos = bdev->internal.qos;
/* Rate limiting on this bdev enabled */ /* Rate limiting on this bdev enabled */
if (qos) { if (qos) {
@ -1302,15 +1302,15 @@ spdk_bdev_channel_create(void *io_device, void *ctx_buf)
} }
#endif #endif
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
if (_spdk_bdev_enable_qos(bdev, ch)) { if (_spdk_bdev_enable_qos(bdev, ch)) {
_spdk_bdev_channel_destroy_resource(ch); _spdk_bdev_channel_destroy_resource(ch);
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return -1; return -1;
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return 0; return 0;
} }
@ -1395,7 +1395,7 @@ spdk_bdev_qos_destroy(struct spdk_bdev *bdev)
*/ */
struct spdk_bdev_qos *new_qos, *old_qos; struct spdk_bdev_qos *new_qos, *old_qos;
old_qos = bdev->qos; old_qos = bdev->internal.qos;
new_qos = calloc(1, sizeof(*new_qos)); new_qos = calloc(1, sizeof(*new_qos));
if (!new_qos) { if (!new_qos) {
@ -1416,7 +1416,7 @@ spdk_bdev_qos_destroy(struct spdk_bdev *bdev)
new_qos->poller = NULL; new_qos->poller = NULL;
TAILQ_INIT(&new_qos->queued); TAILQ_INIT(&new_qos->queued);
bdev->qos = new_qos; bdev->internal.qos = new_qos;
spdk_thread_send_msg(old_qos->thread, spdk_bdev_qos_channel_destroy, spdk_thread_send_msg(old_qos->thread, spdk_bdev_qos_channel_destroy,
old_qos); old_qos);
@ -1541,11 +1541,11 @@ spdk_bdev_get_qos_ios_per_sec(struct spdk_bdev *bdev)
{ {
uint64_t iops_rate_limit = 0; uint64_t iops_rate_limit = 0;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
if (bdev->qos) { if (bdev->internal.qos) {
iops_rate_limit = bdev->qos->iops_rate_limit; iops_rate_limit = bdev->internal.qos->iops_rate_limit;
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return iops_rate_limit; return iops_rate_limit;
} }
@ -1584,10 +1584,10 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
{ {
int ret; int ret;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
/* bdev has open descriptors */ /* bdev has open descriptors */
if (!TAILQ_EMPTY(&bdev->open_descs) && if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
bdev->blockcnt > size) { bdev->blockcnt > size) {
ret = -EBUSY; ret = -EBUSY;
} else { } else {
@ -1595,7 +1595,7 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
ret = 0; ret = 0;
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return ret; return ret;
} }
@ -2040,11 +2040,11 @@ _spdk_bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
* the channel flag is set, so the lock here should not * the channel flag is set, so the lock here should not
* be necessary. We're not in the fast path though, so * be necessary. We're not in the fast path though, so
* just take it anyway. */ * just take it anyway. */
pthread_mutex_lock(&channel->bdev->mutex); pthread_mutex_lock(&channel->bdev->internal.mutex);
if (channel->bdev->qos->ch == channel) { if (channel->bdev->internal.qos->ch == channel) {
TAILQ_SWAP(&channel->bdev->qos->queued, &tmp_queued, spdk_bdev_io, internal.link); TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link);
} }
pthread_mutex_unlock(&channel->bdev->mutex); pthread_mutex_unlock(&channel->bdev->internal.mutex);
} }
_spdk_bdev_abort_queued_io(&shared_resource->nomem_io, channel); _spdk_bdev_abort_queued_io(&shared_resource->nomem_io, channel);
@ -2071,9 +2071,9 @@ _spdk_bdev_channel_start_reset(struct spdk_bdev_channel *ch)
assert(!TAILQ_EMPTY(&ch->queued_resets)); assert(!TAILQ_EMPTY(&ch->queued_resets));
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
if (bdev->reset_in_progress == NULL) { if (bdev->internal.reset_in_progress == NULL) {
bdev->reset_in_progress = TAILQ_FIRST(&ch->queued_resets); bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
/* /*
* Take a channel reference for the target bdev for the life of this * Take a channel reference for the target bdev for the life of this
* reset. This guards against the channel getting destroyed while * reset. This guards against the channel getting destroyed while
@ -2081,10 +2081,10 @@ _spdk_bdev_channel_start_reset(struct spdk_bdev_channel *ch)
* progress. We will release the reference when this reset is * progress. We will release the reference when this reset is
* completed. * completed.
*/ */
bdev->reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev)); bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
_spdk_bdev_start_reset(ch); _spdk_bdev_start_reset(ch);
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
} }
int int
@ -2105,9 +2105,9 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.reset.ch_ref = NULL; bdev_io->u.reset.ch_ref = NULL;
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link); TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link);
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
_spdk_bdev_channel_start_reset(channel); _spdk_bdev_channel_start_reset(channel);
@ -2445,12 +2445,12 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
if (status == SPDK_BDEV_IO_STATUS_NOMEM) { if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
SPDK_ERRLOG("NOMEM returned for reset\n"); SPDK_ERRLOG("NOMEM returned for reset\n");
} }
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
if (bdev_io == bdev->reset_in_progress) { if (bdev_io == bdev->internal.reset_in_progress) {
bdev->reset_in_progress = NULL; bdev->internal.reset_in_progress = NULL;
unlock_channels = true; unlock_channels = true;
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
if (unlock_channels) { if (unlock_channels) {
spdk_for_each_channel(__bdev_to_io_dev(bdev), _spdk_bdev_unfreeze_channel, spdk_for_each_channel(__bdev_to_io_dev(bdev), _spdk_bdev_unfreeze_channel,
@ -2598,9 +2598,9 @@ _spdk_bdev_qos_config_type(struct spdk_bdev *bdev, uint64_t qos_set,
return; return;
} }
if (!bdev->qos) { if (!bdev->internal.qos) {
bdev->qos = calloc(1, sizeof(*bdev->qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
if (!bdev->qos) { if (!bdev->internal.qos) {
SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n"); SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
return; return;
} }
@ -2608,10 +2608,10 @@ _spdk_bdev_qos_config_type(struct spdk_bdev *bdev, uint64_t qos_set,
switch (qos_type) { switch (qos_type) {
case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT: case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
bdev->qos->iops_rate_limit = qos_set; bdev->internal.qos->iops_rate_limit = qos_set;
break; break;
case SPDK_BDEV_QOS_RW_BYTEPS_RATE_LIMIT: case SPDK_BDEV_QOS_RW_BYTEPS_RATE_LIMIT:
bdev->qos->byte_rate_limit = qos_set * 1024 * 1024; bdev->internal.qos->byte_rate_limit = qos_set * 1024 * 1024;
break; break;
default: default:
break; break;
@ -2679,13 +2679,13 @@ spdk_bdev_init(struct spdk_bdev *bdev)
return -EEXIST; return -EEXIST;
} }
bdev->status = SPDK_BDEV_STATUS_READY; bdev->internal.status = SPDK_BDEV_STATUS_READY;
TAILQ_INIT(&bdev->open_descs); TAILQ_INIT(&bdev->internal.open_descs);
TAILQ_INIT(&bdev->aliases); TAILQ_INIT(&bdev->aliases);
bdev->reset_in_progress = NULL; bdev->internal.reset_in_progress = NULL;
_spdk_bdev_qos_config(bdev); _spdk_bdev_qos_config(bdev);
@ -2693,7 +2693,7 @@ spdk_bdev_init(struct spdk_bdev *bdev)
spdk_bdev_channel_create, spdk_bdev_channel_destroy, spdk_bdev_channel_create, spdk_bdev_channel_destroy,
sizeof(struct spdk_bdev_channel)); sizeof(struct spdk_bdev_channel));
pthread_mutex_init(&bdev->mutex, NULL); pthread_mutex_init(&bdev->internal.mutex, NULL);
return 0; return 0;
} }
@ -2706,8 +2706,8 @@ spdk_bdev_destroy_cb(void *io_device)
void *cb_arg; void *cb_arg;
bdev = __bdev_from_io_dev(io_device); bdev = __bdev_from_io_dev(io_device);
cb_fn = bdev->unregister_cb; cb_fn = bdev->internal.unregister_cb;
cb_arg = bdev->unregister_ctx; cb_arg = bdev->internal.unregister_ctx;
rc = bdev->fn_table->destruct(bdev->ctxt); rc = bdev->fn_table->destruct(bdev->ctxt);
if (rc < 0) { if (rc < 0) {
@ -2722,9 +2722,9 @@ spdk_bdev_destroy_cb(void *io_device)
static void static void
spdk_bdev_fini(struct spdk_bdev *bdev) spdk_bdev_fini(struct spdk_bdev *bdev)
{ {
pthread_mutex_destroy(&bdev->mutex); pthread_mutex_destroy(&bdev->internal.mutex);
free(bdev->qos); free(bdev->internal.qos);
spdk_io_device_unregister(__bdev_to_io_dev(bdev), spdk_bdev_destroy_cb); spdk_io_device_unregister(__bdev_to_io_dev(bdev), spdk_bdev_destroy_cb);
} }
@ -2735,7 +2735,7 @@ spdk_bdev_start(struct spdk_bdev *bdev)
struct spdk_bdev_module *module; struct spdk_bdev_module *module;
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Inserting bdev %s into list\n", bdev->name); SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Inserting bdev %s into list\n", bdev->name);
TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, link); TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, tailq) { TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, tailq) {
if (module->examine) { if (module->examine) {
@ -2766,9 +2766,9 @@ spdk_vbdev_remove_base_bdevs(struct spdk_bdev *vbdev)
bool found; bool found;
/* Iterate over base bdevs to remove vbdev from them. */ /* Iterate over base bdevs to remove vbdev from them. */
for (i = 0; i < vbdev->base_bdevs_cnt; i++) { for (i = 0; i < vbdev->internal.base_bdevs_cnt; i++) {
found = false; found = false;
base = vbdev->base_bdevs[i]; base = vbdev->internal.base_bdevs[i];
for (j = 0; j < base->vbdevs_cnt; j++) { for (j = 0; j < base->vbdevs_cnt; j++) {
if (base->vbdevs[j] != vbdev) { if (base->vbdevs[j] != vbdev) {
@ -2799,9 +2799,9 @@ spdk_vbdev_remove_base_bdevs(struct spdk_bdev *vbdev)
} }
} }
free(vbdev->base_bdevs); free(vbdev->internal.base_bdevs);
vbdev->base_bdevs = NULL; vbdev->internal.base_bdevs = NULL;
vbdev->base_bdevs_cnt = 0; vbdev->internal.base_bdevs_cnt = 0;
} }
static int static int
@ -2812,23 +2812,23 @@ spdk_vbdev_set_base_bdevs(struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs
size_t i; size_t i;
/* Adding base bdevs isn't supported (yet?). */ /* Adding base bdevs isn't supported (yet?). */
assert(vbdev->base_bdevs_cnt == 0); assert(vbdev->internal.base_bdevs_cnt == 0);
vbdev->base_bdevs = malloc(cnt * sizeof(vbdev->base_bdevs[0])); vbdev->internal.base_bdevs = malloc(cnt * sizeof(vbdev->internal.base_bdevs[0]));
if (!vbdev->base_bdevs) { if (!vbdev->internal.base_bdevs) {
SPDK_ERRLOG("%s - realloc() failed\n", vbdev->name); SPDK_ERRLOG("%s - realloc() failed\n", vbdev->name);
return -ENOMEM; return -ENOMEM;
} }
memcpy(vbdev->base_bdevs, base_bdevs, cnt * sizeof(vbdev->base_bdevs[0])); memcpy(vbdev->internal.base_bdevs, base_bdevs, cnt * sizeof(vbdev->internal.base_bdevs[0]));
vbdev->base_bdevs_cnt = cnt; vbdev->internal.base_bdevs_cnt = cnt;
/* Iterate over base bdevs to add this vbdev to them. */ /* Iterate over base bdevs to add this vbdev to them. */
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
base = vbdev->base_bdevs[i]; base = vbdev->internal.base_bdevs[i];
assert(base != NULL); assert(base != NULL);
assert(base->claim_module != NULL); assert(base->internal.claim_module != NULL);
vbdevs = realloc(base->vbdevs, (base->vbdevs_cnt + 1) * sizeof(vbdevs[0])); vbdevs = realloc(base->vbdevs, (base->vbdevs_cnt + 1) * sizeof(vbdevs[0]));
if (!vbdevs) { if (!vbdevs) {
@ -2874,8 +2874,8 @@ spdk_vbdev_register(struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs, int
void void
spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno) spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
{ {
if (bdev->unregister_cb != NULL) { if (bdev->internal.unregister_cb != NULL) {
bdev->unregister_cb(bdev->unregister_ctx, bdeverrno); bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
} }
} }
@ -2903,15 +2903,15 @@ spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void
return; return;
} }
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
spdk_vbdev_remove_base_bdevs(bdev); spdk_vbdev_remove_base_bdevs(bdev);
bdev->status = SPDK_BDEV_STATUS_REMOVING; bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
bdev->unregister_cb = cb_fn; bdev->internal.unregister_cb = cb_fn;
bdev->unregister_ctx = cb_arg; bdev->internal.unregister_ctx = cb_arg;
TAILQ_FOREACH_SAFE(desc, &bdev->open_descs, link, tmp) { TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
if (desc->remove_cb) { if (desc->remove_cb) {
do_destruct = false; do_destruct = false;
/* /*
@ -2929,12 +2929,12 @@ spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void
} }
if (!do_destruct) { if (!do_destruct) {
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return; return;
} }
TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, link); TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
spdk_bdev_fini(bdev); spdk_bdev_fini(bdev);
} }
@ -2954,16 +2954,16 @@ spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name, SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
spdk_get_thread()); spdk_get_thread());
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
if (write && bdev->claim_module) { if (write && bdev->internal.claim_module) {
SPDK_ERRLOG("Could not open %s - already claimed\n", bdev->name); SPDK_ERRLOG("Could not open %s - already claimed\n", bdev->name);
free(desc); free(desc);
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return -EPERM; return -EPERM;
} }
TAILQ_INSERT_TAIL(&bdev->open_descs, desc, link); TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
desc->bdev = bdev; desc->bdev = bdev;
desc->remove_cb = remove_cb; desc->remove_cb = remove_cb;
@ -2971,7 +2971,7 @@ spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_
desc->write = write; desc->write = write;
*_desc = desc; *_desc = desc;
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
return 0; return 0;
} }
@ -2985,13 +2985,13 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name, SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
spdk_get_thread()); spdk_get_thread());
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
TAILQ_REMOVE(&bdev->open_descs, desc, link); TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
free(desc); free(desc);
/* If no more descriptors, kill QoS channel */ /* If no more descriptors, kill QoS channel */
if (bdev->qos && TAILQ_EMPTY(&bdev->open_descs)) { if (bdev->internal.qos && TAILQ_EMPTY(&bdev->internal.open_descs)) {
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n", SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
bdev->name, spdk_get_thread()); bdev->name, spdk_get_thread());
@ -3003,13 +3003,13 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
} }
} }
if (bdev->status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->open_descs)) { if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
do_unregister = true; do_unregister = true;
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
if (do_unregister == true) { if (do_unregister == true) {
spdk_bdev_unregister(bdev, bdev->unregister_cb, bdev->unregister_ctx); spdk_bdev_unregister(bdev, bdev->internal.unregister_cb, bdev->internal.unregister_ctx);
} }
} }
@ -3017,9 +3017,9 @@ int
spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_bdev_module *module) struct spdk_bdev_module *module)
{ {
if (bdev->claim_module != NULL) { if (bdev->internal.claim_module != NULL) {
SPDK_ERRLOG("bdev %s already claimed by module %s\n", bdev->name, SPDK_ERRLOG("bdev %s already claimed by module %s\n", bdev->name,
bdev->claim_module->name); bdev->internal.claim_module->name);
return -EPERM; return -EPERM;
} }
@ -3027,15 +3027,15 @@ spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
desc->write = true; desc->write = true;
} }
bdev->claim_module = module; bdev->internal.claim_module = module;
return 0; return 0;
} }
void void
spdk_bdev_module_release_bdev(struct spdk_bdev *bdev) spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
{ {
assert(bdev->claim_module != NULL); assert(bdev->internal.claim_module != NULL);
bdev->claim_module = NULL; bdev->internal.claim_module = NULL;
} }
struct spdk_bdev * struct spdk_bdev *
@ -3155,9 +3155,9 @@ struct set_qos_limit_ctx {
static void static void
_spdk_bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status) _spdk_bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
{ {
pthread_mutex_lock(&ctx->bdev->mutex); pthread_mutex_lock(&ctx->bdev->internal.mutex);
ctx->bdev->qos_mod_in_progress = false; ctx->bdev->internal.qos_mod_in_progress = false;
pthread_mutex_unlock(&ctx->bdev->mutex); pthread_mutex_unlock(&ctx->bdev->internal.mutex);
ctx->cb_fn(ctx->cb_arg, status); ctx->cb_fn(ctx->cb_arg, status);
free(ctx); free(ctx);
@ -3170,10 +3170,10 @@ _spdk_bdev_disable_qos_done(void *cb_arg)
struct spdk_bdev *bdev = ctx->bdev; struct spdk_bdev *bdev = ctx->bdev;
struct spdk_bdev_qos *qos; struct spdk_bdev_qos *qos;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
qos = bdev->qos; qos = bdev->internal.qos;
bdev->qos = NULL; bdev->internal.qos = NULL;
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
_spdk_bdev_abort_queued_io(&qos->queued, qos->ch); _spdk_bdev_abort_queued_io(&qos->queued, qos->ch);
spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch)); spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
@ -3192,9 +3192,9 @@ _spdk_bdev_disable_qos_msg_done(struct spdk_io_channel_iter *i, int status)
struct set_qos_limit_ctx *ctx = spdk_io_channel_iter_get_ctx(i); struct set_qos_limit_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
struct spdk_thread *thread; struct spdk_thread *thread;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
thread = bdev->qos->thread; thread = bdev->internal.qos->thread;
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
spdk_thread_send_msg(thread, _spdk_bdev_disable_qos_done, ctx); spdk_thread_send_msg(thread, _spdk_bdev_disable_qos_done, ctx);
} }
@ -3216,9 +3216,9 @@ _spdk_bdev_update_qos_limit_iops_msg(void *cb_arg)
struct set_qos_limit_ctx *ctx = cb_arg; struct set_qos_limit_ctx *ctx = cb_arg;
struct spdk_bdev *bdev = ctx->bdev; struct spdk_bdev *bdev = ctx->bdev;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
spdk_bdev_qos_update_max_quota_per_timeslice(bdev->qos); spdk_bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
_spdk_bdev_set_qos_limit_done(ctx, 0); _spdk_bdev_set_qos_limit_done(ctx, 0);
} }
@ -3232,9 +3232,9 @@ _spdk_bdev_enable_qos_msg(struct spdk_io_channel_iter *i)
struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(ch); struct spdk_bdev_channel *bdev_ch = spdk_io_channel_get_ctx(ch);
int rc; int rc;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
rc = _spdk_bdev_enable_qos(bdev, bdev_ch); rc = _spdk_bdev_enable_qos(bdev, bdev_ch);
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
spdk_for_each_channel_continue(i, rc); spdk_for_each_channel_continue(i, rc);
} }
@ -3269,50 +3269,50 @@ spdk_bdev_set_qos_limit_iops(struct spdk_bdev *bdev, uint64_t ios_per_sec,
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
ctx->bdev = bdev; ctx->bdev = bdev;
pthread_mutex_lock(&bdev->mutex); pthread_mutex_lock(&bdev->internal.mutex);
if (bdev->qos_mod_in_progress) { if (bdev->internal.qos_mod_in_progress) {
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
free(ctx); free(ctx);
cb_fn(cb_arg, -EAGAIN); cb_fn(cb_arg, -EAGAIN);
return; return;
} }
bdev->qos_mod_in_progress = true; bdev->internal.qos_mod_in_progress = true;
if (ios_per_sec > 0) { if (ios_per_sec > 0) {
if (bdev->qos == NULL) { if (bdev->internal.qos == NULL) {
/* Enabling */ /* Enabling */
bdev->qos = calloc(1, sizeof(*bdev->qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
if (!bdev->qos) { if (!bdev->internal.qos) {
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n"); SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
free(ctx); free(ctx);
cb_fn(cb_arg, -ENOMEM); cb_fn(cb_arg, -ENOMEM);
return; return;
} }
bdev->qos->iops_rate_limit = ios_per_sec; bdev->internal.qos->iops_rate_limit = ios_per_sec;
spdk_for_each_channel(__bdev_to_io_dev(bdev), spdk_for_each_channel(__bdev_to_io_dev(bdev),
_spdk_bdev_enable_qos_msg, ctx, _spdk_bdev_enable_qos_msg, ctx,
_spdk_bdev_enable_qos_done); _spdk_bdev_enable_qos_done);
} else { } else {
/* Updating */ /* Updating */
bdev->qos->iops_rate_limit = ios_per_sec; bdev->internal.qos->iops_rate_limit = ios_per_sec;
spdk_thread_send_msg(bdev->qos->thread, _spdk_bdev_update_qos_limit_iops_msg, ctx); spdk_thread_send_msg(bdev->internal.qos->thread, _spdk_bdev_update_qos_limit_iops_msg, ctx);
} }
} else { } else {
if (bdev->qos != NULL) { if (bdev->internal.qos != NULL) {
/* Disabling */ /* Disabling */
spdk_for_each_channel(__bdev_to_io_dev(bdev), spdk_for_each_channel(__bdev_to_io_dev(bdev),
_spdk_bdev_disable_qos_msg, ctx, _spdk_bdev_disable_qos_msg, ctx,
_spdk_bdev_disable_qos_msg_done); _spdk_bdev_disable_qos_msg_done);
} else { } else {
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
_spdk_bdev_set_qos_limit_done(ctx, 0); _spdk_bdev_set_qos_limit_done(ctx, 0);
return; return;
} }
} }
pthread_mutex_unlock(&bdev->mutex); pthread_mutex_unlock(&bdev->internal.mutex);
} }
SPDK_LOG_REGISTER_COMPONENT("bdev", SPDK_LOG_BDEV) SPDK_LOG_REGISTER_COMPONENT("bdev", SPDK_LOG_BDEV)

View File

@ -228,7 +228,7 @@ spdk_rpc_dump_bdev_info(struct spdk_json_write_ctx *w,
spdk_json_write_uint64(w, spdk_bdev_get_qos_ios_per_sec(bdev)); spdk_json_write_uint64(w, spdk_bdev_get_qos_ios_per_sec(bdev));
spdk_json_write_name(w, "claimed"); spdk_json_write_name(w, "claimed");
spdk_json_write_bool(w, (bdev->claim_module != NULL)); spdk_json_write_bool(w, (bdev->internal.claim_module != NULL));
spdk_json_write_name(w, "supported_io_types"); spdk_json_write_name(w, "supported_io_types");
spdk_json_write_object_begin(w); spdk_json_write_object_begin(w);

View File

@ -215,8 +215,8 @@ is_base_bdev(struct spdk_bdev *base, struct spdk_bdev *vbdev)
size_t i; size_t i;
int found = 0; int found = 0;
for (i = 0; i < vbdev->base_bdevs_cnt; i++) { for (i = 0; i < vbdev->internal.base_bdevs_cnt; i++) {
found += vbdev->base_bdevs[i] == base; found += vbdev->internal.base_bdevs[i] == base;
} }
CU_ASSERT(found <= 1); CU_ASSERT(found <= 1);
@ -250,7 +250,7 @@ allocate_bdev(char *name)
rc = spdk_bdev_register(bdev); rc = spdk_bdev_register(bdev);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(bdev->base_bdevs_cnt == 0); CU_ASSERT(bdev->internal.base_bdevs_cnt == 0);
CU_ASSERT(bdev->vbdevs_cnt == 0); CU_ASSERT(bdev->vbdevs_cnt == 0);
return bdev; return bdev;
@ -278,7 +278,7 @@ allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2); rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(bdev->base_bdevs_cnt > 0); CU_ASSERT(bdev->internal.base_bdevs_cnt > 0);
CU_ASSERT(bdev->vbdevs_cnt == 0); CU_ASSERT(bdev->vbdevs_cnt == 0);
CU_ASSERT(check_base_and_vbdev(base1, bdev) == true); CU_ASSERT(check_base_and_vbdev(base1, bdev) == true);
@ -301,7 +301,7 @@ free_bdev(struct spdk_bdev *bdev)
static void static void
free_vbdev(struct spdk_bdev *bdev) free_vbdev(struct spdk_bdev *bdev)
{ {
CU_ASSERT(bdev->base_bdevs_cnt != 0); CU_ASSERT(bdev->internal.base_bdevs_cnt != 0);
spdk_bdev_unregister(bdev, NULL, NULL); spdk_bdev_unregister(bdev, NULL, NULL);
memset(bdev, 0xFF, sizeof(*bdev)); memset(bdev, 0xFF, sizeof(*bdev));
free(bdev); free(bdev);

View File

@ -450,7 +450,7 @@ aborted_reset(void)
CU_ASSERT(io_ch[0] != NULL); CU_ASSERT(io_ch[0] != NULL);
spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
poll_threads(); poll_threads();
CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
/* /*
* First reset has been submitted on ch0. Now submit a second * First reset has been submitted on ch0. Now submit a second
@ -462,32 +462,32 @@ aborted_reset(void)
CU_ASSERT(io_ch[1] != NULL); CU_ASSERT(io_ch[1] != NULL);
spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
poll_threads(); poll_threads();
CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
/* /*
* Now destroy ch1. This will abort the queued reset. Check that * Now destroy ch1. This will abort the queued reset. Check that
* the second reset was completed with failed status. Also check * the second reset was completed with failed status. Also check
* that bdev->reset_in_progress != NULL, since the original reset * that bdev->internal.reset_in_progress != NULL, since the
* has not been completed yet. This ensures that the bdev code is * original reset has not been completed yet. This ensures that
* correctly noticing that the failed reset is *not* the one that * the bdev code is correctly noticing that the failed reset is
* had been submitted to the bdev module. * *not* the one that had been submitted to the bdev module.
*/ */
set_thread(1); set_thread(1);
spdk_put_io_channel(io_ch[1]); spdk_put_io_channel(io_ch[1]);
poll_threads(); poll_threads();
CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED); CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
/* /*
* Now complete the first reset, verify that it completed with SUCCESS * Now complete the first reset, verify that it completed with SUCCESS
* status and that bdev->reset_in_progress is also set back to NULL. * status and that bdev->internal.reset_in_progress is also set back to NULL.
*/ */
set_thread(0); set_thread(0);
spdk_put_io_channel(io_ch[0]); spdk_put_io_channel(io_ch[0]);
stub_complete_io(g_bdev.io_target, 0); stub_complete_io(g_bdev.io_target, 0);
poll_threads(); poll_threads();
CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL); CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
teardown_test(); teardown_test();
} }
@ -618,15 +618,15 @@ basic_qos(void)
/* Enable QoS */ /* Enable QoS */
bdev = &g_bdev.bdev; bdev = &g_bdev.bdev;
bdev->qos = calloc(1, sizeof(*bdev->qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
SPDK_CU_ASSERT_FATAL(bdev->qos != NULL); SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
TAILQ_INIT(&bdev->qos->queued); TAILQ_INIT(&bdev->internal.qos->queued);
/* /*
* Enable both IOPS and bandwidth rate limits. * Enable both IOPS and bandwidth rate limits.
* In this case, both rate limits will take equal effect. * In this case, both rate limits will take equal effect.
*/ */
bdev->qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */ bdev->internal.qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
bdev->qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */ bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
g_get_io_channel = true; g_get_io_channel = true;
@ -682,7 +682,7 @@ basic_qos(void)
/* Close the descriptor, which should stop the qos channel */ /* Close the descriptor, which should stop the qos channel */
spdk_bdev_close(g_desc); spdk_bdev_close(g_desc);
poll_threads(); poll_threads();
CU_ASSERT(bdev->qos->ch == NULL); CU_ASSERT(bdev->internal.qos->ch == NULL);
spdk_bdev_open(bdev, true, NULL, NULL, &g_desc); spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
@ -698,7 +698,7 @@ basic_qos(void)
CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
/* Confirm that the qos thread is now thread 1 */ /* Confirm that the qos thread is now thread 1 */
CU_ASSERT(bdev->qos->ch == bdev_ch[1]); CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
/* Tear down the channels */ /* Tear down the channels */
set_thread(0); set_thread(0);
@ -726,15 +726,15 @@ io_during_qos_queue(void)
/* Enable QoS */ /* Enable QoS */
bdev = &g_bdev.bdev; bdev = &g_bdev.bdev;
bdev->qos = calloc(1, sizeof(*bdev->qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
SPDK_CU_ASSERT_FATAL(bdev->qos != NULL); SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
TAILQ_INIT(&bdev->qos->queued); TAILQ_INIT(&bdev->internal.qos->queued);
/* /*
* Enable both IOPS and bandwidth rate limits. * Enable both IOPS and bandwidth rate limits.
* In this case, IOPS rate limit will take effect first. * In this case, IOPS rate limit will take effect first.
*/ */
bdev->qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */ bdev->internal.qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
bdev->qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */ bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
g_get_io_channel = true; g_get_io_channel = true;
@ -814,15 +814,15 @@ io_during_qos_reset(void)
/* Enable QoS */ /* Enable QoS */
bdev = &g_bdev.bdev; bdev = &g_bdev.bdev;
bdev->qos = calloc(1, sizeof(*bdev->qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
SPDK_CU_ASSERT_FATAL(bdev->qos != NULL); SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
TAILQ_INIT(&bdev->qos->queued); TAILQ_INIT(&bdev->internal.qos->queued);
/* /*
* Enable both IOPS and bandwidth rate limits. * Enable both IOPS and bandwidth rate limits.
* In this case, bandwidth rate limit will take effect first. * In this case, bandwidth rate limit will take effect first.
*/ */
bdev->qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */ bdev->internal.qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
bdev->qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */ bdev->internal.qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
g_get_io_channel = true; g_get_io_channel = true;