Compare commits
98 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
31b5b6e1e6 | ||
|
f6dfa72703 | ||
|
50a116dbe5 | ||
|
82f691be43 | ||
|
efabcb5b61 | ||
|
52fa503769 | ||
|
d1a833ffb1 | ||
|
f944a090e3 | ||
|
84b5ac7dac | ||
|
3c22b8e41b | ||
|
a15831658d | ||
|
880cbc5da3 | ||
|
91096c9e4f | ||
|
9811e15503 | ||
|
a82ed6455a | ||
|
b6765e8efa | ||
|
4b58c6a3db | ||
|
a994009f30 | ||
|
902bef378c | ||
|
ad96f83787 | ||
|
9a92b9e3ed | ||
|
366297805e | ||
|
59401ab51b | ||
|
a6c675733b | ||
|
be83292657 | ||
|
b85a64c878 | ||
|
31f18569bd | ||
|
1fa9d7e14e | ||
|
d8a948ca1d | ||
|
3768d9a8c6 | ||
|
a7411e316a | ||
|
e1e269463d | ||
|
7a606da6df | ||
|
3e5cfa4124 | ||
|
624769671d | ||
|
ce3ffa45ed | ||
|
2c4928649a | ||
|
398e1b15ac | ||
|
8e0360cffa | ||
|
c6b5afcec9 | ||
|
c126086c00 | ||
|
38a1ebb516 | ||
|
1c78126235 | ||
|
11b1e6bf4c | ||
|
a2711aa354 | ||
|
38031e742c | ||
|
6450c84d9a | ||
|
f3ff9870d8 | ||
|
5f930d1738 | ||
|
58b41ea2b5 | ||
|
301d6e7c78 | ||
|
3f6c9a4e0f | ||
|
0b4df54fb2 | ||
|
5242a14b30 | ||
|
10698ea595 | ||
|
9c52d301db | ||
|
1256453f92 | ||
|
d77f8f48e6 | ||
|
90f2eb1863 | ||
|
d5e3f80d0f | ||
|
f282384991 | ||
|
e0145cc2a7 | ||
|
f176690ec7 | ||
|
2577a292ef | ||
|
6381e4749d | ||
|
611e2f7823 | ||
|
5f697b3148 | ||
|
01088b6906 | ||
|
abb0ccd40a | ||
|
4bf1f64f56 | ||
|
94b8126d8a | ||
|
93be2ada98 | ||
|
754c217d09 | ||
|
c7658dea37 | ||
|
a39873a5e5 | ||
|
a715b454e9 | ||
|
12466d1ae3 | ||
|
d5aeeaf2f7 | ||
|
18a977f289 | ||
|
f8c1f991f0 | ||
|
9cbb4af809 | ||
|
c2d4486dba | ||
|
30c4390ea0 | ||
|
de271dbe13 | ||
|
f6823c5907 | ||
|
e30e5d7c79 | ||
|
d353ce1982 | ||
|
850a5b642a | ||
|
ca3a5cf8de | ||
|
820d0320dd | ||
|
adb85a252d | ||
|
6df4d14eec | ||
|
ade2511df2 | ||
|
02b29614e7 | ||
|
a9085b08c3 | ||
|
cf0daf7f3b | ||
|
5ba7fb92c6 | ||
|
3706702904 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -27,4 +27,5 @@ CONFIG.local
|
||||
.settings
|
||||
mk/cc.mk
|
||||
mk/config.mk
|
||||
mk/cc.flags.mk
|
||||
PYTHON_COMMAND
|
||||
|
17
CHANGELOG.md
17
CHANGELOG.md
@ -1,5 +1,22 @@
|
||||
# Changelog
|
||||
|
||||
## v19.04.2: (Upcoming Release)
|
||||
|
||||
## v19.04.1:
|
||||
|
||||
### NVMe-oF Target
|
||||
|
||||
Increased default maximum number of queue pairs to 128 in order to match
|
||||
Linux kernel target. Users can still decrease this default when
|
||||
creating the transport (i.e. -p option for nvmf_create_transport in rpc.py).
|
||||
|
||||
Shared receive queue can now be disabled even for NICs that support it using the
|
||||
`nvmf_create_transport` RPC method parameter `no_srq`. The actual use of a shared
|
||||
receive queue is predicated on hardware support when this flag is not used.
|
||||
|
||||
### DPDK
|
||||
Added DPDK 19.05 support
|
||||
|
||||
## v19.04:
|
||||
|
||||
### nvme
|
||||
|
6
Makefile
6
Makefile
@ -63,10 +63,12 @@ endif
|
||||
|
||||
ifeq ($(CONFIG_IPSEC_MB),y)
|
||||
LIB += ipsecbuild
|
||||
DPDK_DEPS += ipsecbuild
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ISAL),y)
|
||||
LIB += isalbuild
|
||||
DPDK_DEPS += isalbuild
|
||||
endif
|
||||
|
||||
all: $(DIRS-y)
|
||||
@ -78,9 +80,7 @@ install: all
|
||||
$(Q)echo "Installed to $(DESTDIR)$(CONFIG_PREFIX)"
|
||||
|
||||
ifneq ($(SKIP_DPDK_BUILD),1)
|
||||
ifeq ($(CONFIG_ISAL),y)
|
||||
dpdkbuild: isalbuild
|
||||
endif
|
||||
dpdkbuild: $(DPDK_DEPS)
|
||||
endif
|
||||
|
||||
shared_lib: lib
|
||||
|
@ -10,8 +10,7 @@ out=$PWD
|
||||
MAKEFLAGS=${MAKEFLAGS:--j16}
|
||||
cd $rootdir
|
||||
|
||||
timing_enter autopackage
|
||||
|
||||
timing_enter porcelain_check
|
||||
$MAKE clean
|
||||
|
||||
if [ `git status --porcelain --ignore-submodules | wc -l` -ne 0 ]; then
|
||||
@ -19,6 +18,14 @@ if [ `git status --porcelain --ignore-submodules | wc -l` -ne 0 ]; then
|
||||
git status --porcelain --ignore-submodules
|
||||
exit 1
|
||||
fi
|
||||
timing_exit porcelain_check
|
||||
|
||||
if [ $RUN_NIGHTLY -eq 0 ]; then
|
||||
timing_finish
|
||||
exit 0
|
||||
fi
|
||||
|
||||
timing_enter autopackage
|
||||
|
||||
spdk_pv=spdk-$(date +%Y_%m_%d)
|
||||
spdk_tarball=${spdk_pv}.tar
|
||||
|
@ -779,14 +779,14 @@ Example response:
|
||||
|
||||
Construct new OCF bdev.
|
||||
Command accepts cache mode that is going to be used.
|
||||
Currently, we support Write-Through and Pass-Through OCF cache modes.
|
||||
Currently, we support Write-Through, Pass-Through and Write-Back OCF cache modes.
|
||||
|
||||
### Parameters
|
||||
|
||||
Name | Optional | Type | Description
|
||||
----------------------- | -------- | ----------- | -----------
|
||||
name | Required | string | Bdev name to use
|
||||
mode | Required | string | OCF cache mode ('wt' or 'pt')
|
||||
mode | Required | string | OCF cache mode ('wb' or 'wt' or 'pt')
|
||||
cache_bdev_name | Required | string | Name of underlying cache bdev
|
||||
core_bdev_name | Required | string | Name of underlying core bdev
|
||||
|
||||
@ -3487,6 +3487,8 @@ io_unit_size | Optional | number | I/O unit size (bytes)
|
||||
max_aq_depth | Optional | number | Max number of admin cmds per AQ
|
||||
num_shared_buffers | Optional | number | The number of pooled data buffers available to the transport
|
||||
buf_cache_size | Optional | number | The number of shared buffers to reserve for each poll group
|
||||
max_srq_depth | Optional | number | The number of elements in a per-thread shared receive queue (RDMA only)
|
||||
no_srq | Optional | boolean | Disable shared receive queue even for devices that support it. (RDMA only)
|
||||
|
||||
### Example:
|
||||
|
||||
|
@ -46,6 +46,7 @@ ifeq ($(CONFIG_CRYPTO),y)
|
||||
CRYPTO_ENABLED = y
|
||||
DPDK_OPTS += CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y
|
||||
DPDK_CFLAGS += -I$(IPSEC_MB_DIR)
|
||||
DPDK_LDFLAGS += -L$(IPSEC_MB_DIR)
|
||||
DPDK_OPTS += CONFIG_RTE_LIBRTE_REORDER=y
|
||||
DPDK_OPTS += CONFIG_RTE_LIBRTE_PMD_QAT=y
|
||||
DPDK_OPTS += CONFIG_RTE_LIBRTE_PMD_QAT_SYM=y
|
||||
@ -67,6 +68,7 @@ endif
|
||||
ifeq ($(CONFIG_REDUCE),y)
|
||||
DPDK_OPTS += CONFIG_RTE_LIBRTE_PMD_ISAL=y
|
||||
DPDK_CFLAGS += -I$(ISAL_DIR)
|
||||
DPDK_LDFLAGS += -L$(ISAL_DIR)/.libs
|
||||
else
|
||||
DPDK_OPTS += CONFIG_RTE_LIBRTE_PMD_ISAL=n
|
||||
endif
|
||||
@ -117,7 +119,7 @@ $(SPDK_ROOT_DIR)/dpdk/build:
|
||||
$(Q)$(MAKE) -C $(SPDK_ROOT_DIR)/dpdk config T=$(DPDK_CONFIG) $(DPDK_OPTS)
|
||||
|
||||
all: $(SPDK_ROOT_DIR)/dpdk/build
|
||||
$(Q)$(MAKE) -C $(SPDK_ROOT_DIR)/dpdk/build EXTRA_CFLAGS="$(DPDK_CFLAGS)" MAKEFLAGS="T=$(DPDK_CONFIG) -j$(NPROC)" $(DPDK_OPTS)
|
||||
$(Q)$(MAKE) -C $(SPDK_ROOT_DIR)/dpdk/build EXTRA_CFLAGS="$(DPDK_CFLAGS)" EXTRA_LDFLAGS="$(DPDK_LDFLAGS)" MAKEFLAGS="T=$(DPDK_CONFIG) -j$(NPROC)" $(DPDK_OPTS)
|
||||
|
||||
clean:
|
||||
$(Q)rm -rf $(SPDK_ROOT_DIR)/dpdk/build
|
||||
|
@ -49,6 +49,12 @@
|
||||
#include "fio.h"
|
||||
#include "optgroup.h"
|
||||
|
||||
/* FreeBSD is missing CLOCK_MONOTONIC_RAW,
|
||||
* so alternative is provided. */
|
||||
#ifndef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
|
||||
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
struct spdk_fio_options {
|
||||
void *pad;
|
||||
char *conf;
|
||||
|
@ -45,6 +45,12 @@
|
||||
#include "fio.h"
|
||||
#include "optgroup.h"
|
||||
|
||||
/* FreeBSD is missing CLOCK_MONOTONIC_RAW,
|
||||
* so alternative is provided. */
|
||||
#ifndef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
|
||||
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
#define NVME_IO_ALIGN 4096
|
||||
|
||||
static bool g_spdk_env_initialized;
|
||||
|
@ -132,6 +132,19 @@ struct spdk_app_opts {
|
||||
|
||||
/** Opaque context for use of the env implementation. */
|
||||
void *env_context;
|
||||
|
||||
/**
|
||||
* for passing user-provided log call
|
||||
*
|
||||
* \param level Log level threshold.
|
||||
* \param file Name of the current source file.
|
||||
* \param line Current source file line.
|
||||
* \param func Current source function name.
|
||||
* \param format Format string to the message.
|
||||
*/
|
||||
void (* log)(int level, const char *file, const int line,
|
||||
const char *func, const char *format);
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -45,11 +45,14 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef void logfunc(int level, const char *file, const int line,
|
||||
const char *func, const char *format);
|
||||
|
||||
/**
|
||||
* Initialize the logging module. Messages prior
|
||||
* to this call will be dropped.
|
||||
*/
|
||||
void spdk_log_open(void);
|
||||
void spdk_log_open(logfunc *logf);
|
||||
|
||||
/**
|
||||
* Close the currently active log. Messages after this call
|
||||
|
@ -64,15 +64,16 @@ struct spdk_json_write_ctx;
|
||||
struct spdk_nvmf_transport;
|
||||
|
||||
struct spdk_nvmf_transport_opts {
|
||||
uint16_t max_queue_depth;
|
||||
uint16_t max_qpairs_per_ctrlr;
|
||||
uint32_t in_capsule_data_size;
|
||||
uint32_t max_io_size;
|
||||
uint32_t io_unit_size;
|
||||
uint32_t max_aq_depth;
|
||||
uint32_t num_shared_buffers;
|
||||
uint32_t buf_cache_size;
|
||||
uint32_t max_srq_depth;
|
||||
uint16_t max_queue_depth;
|
||||
uint16_t max_qpairs_per_ctrlr;
|
||||
uint32_t in_capsule_data_size;
|
||||
uint32_t max_io_size;
|
||||
uint32_t io_unit_size;
|
||||
uint32_t max_aq_depth;
|
||||
uint32_t num_shared_buffers;
|
||||
uint32_t buf_cache_size;
|
||||
uint32_t max_srq_depth;
|
||||
bool no_srq;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -54,12 +54,12 @@
|
||||
* Patch level is incremented on maintenance branch releases and reset to 0 for each
|
||||
* new major.minor release.
|
||||
*/
|
||||
#define SPDK_VERSION_PATCH 0
|
||||
#define SPDK_VERSION_PATCH 2
|
||||
|
||||
/**
|
||||
* Version string suffix.
|
||||
*/
|
||||
#define SPDK_VERSION_SUFFIX ""
|
||||
#define SPDK_VERSION_SUFFIX "-pre"
|
||||
|
||||
/**
|
||||
* Single numeric value representing a version number for compile-time comparisons.
|
||||
|
@ -2903,6 +2903,11 @@ spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channe
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!_spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) &&
|
||||
!_spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
bdev_io = spdk_bdev_get_io(channel);
|
||||
|
||||
if (!bdev_io) {
|
||||
@ -2919,16 +2924,15 @@ spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channe
|
||||
if (_spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
|
||||
spdk_bdev_io_submit(bdev_io);
|
||||
return 0;
|
||||
} else if (_spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
|
||||
assert(spdk_bdev_get_block_size(bdev) <= ZERO_BUFFER_SIZE);
|
||||
bdev_io->u.bdev.split_remaining_num_blocks = num_blocks;
|
||||
bdev_io->u.bdev.split_current_offset_blocks = offset_blocks;
|
||||
_spdk_bdev_write_zero_buffer_next(bdev_io);
|
||||
return 0;
|
||||
} else {
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
assert(_spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE));
|
||||
assert(spdk_bdev_get_block_size(bdev) <= ZERO_BUFFER_SIZE);
|
||||
bdev_io->u.bdev.split_remaining_num_blocks = num_blocks;
|
||||
bdev_io->u.bdev.split_current_offset_blocks = offset_blocks;
|
||||
_spdk_bdev_write_zero_buffer_next(bdev_io);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -693,6 +693,7 @@ _reduce_destroy_cb(void *ctx, int reduce_errno)
|
||||
}
|
||||
|
||||
comp_bdev->vol = NULL;
|
||||
spdk_put_io_channel(comp_bdev->base_ch);
|
||||
spdk_bdev_unregister(&comp_bdev->comp_bdev, comp_bdev->delete_cb_fn,
|
||||
comp_bdev->delete_cb_arg);
|
||||
}
|
||||
@ -703,11 +704,12 @@ delete_vol_unload_cb(void *cb_arg, int reduce_errno)
|
||||
{
|
||||
struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg;
|
||||
|
||||
/* Close the underlying bdev. */
|
||||
spdk_bdev_close(comp_bdev->base_desc);
|
||||
if (reduce_errno) {
|
||||
SPDK_ERRLOG("number %d\n", reduce_errno);
|
||||
} else {
|
||||
/* reducelib needs a channel to comm with the backing device */
|
||||
comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc);
|
||||
|
||||
/* Clean the device before we free our resources. */
|
||||
spdk_reduce_vol_destroy(&comp_bdev->backing_dev, _reduce_destroy_cb, comp_bdev);
|
||||
}
|
||||
@ -1300,6 +1302,7 @@ vbdev_reduce_load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno
|
||||
/* Done with metadata operations */
|
||||
spdk_put_io_channel(meta_ctx->base_ch);
|
||||
spdk_bdev_close(meta_ctx->base_desc);
|
||||
meta_ctx->base_desc = NULL;
|
||||
|
||||
if (reduce_errno != 0) {
|
||||
/* This error means it is not a compress disk. */
|
||||
|
@ -579,12 +579,14 @@ void
|
||||
vbdev_lvol_destroy(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
|
||||
{
|
||||
struct vbdev_lvol_destroy_ctx *ctx;
|
||||
size_t count;
|
||||
|
||||
assert(lvol != NULL);
|
||||
assert(cb_fn != NULL);
|
||||
|
||||
/* Check if it is possible to delete lvol */
|
||||
if (spdk_lvol_deletable(lvol) == false) {
|
||||
spdk_blob_get_clones(lvol->lvol_store->blobstore, lvol->blob_id, NULL, &count);
|
||||
if (count > 1) {
|
||||
/* throw an error */
|
||||
SPDK_ERRLOG("Cannot delete lvol\n");
|
||||
cb_fn(cb_arg, -EPERM);
|
||||
|
@ -225,11 +225,11 @@ vbdev_ocf_ctx_data_seek(ctx_data_t *dst, ctx_data_seek_t seek, uint32_t offset)
|
||||
|
||||
switch (seek) {
|
||||
case ctx_data_seek_begin:
|
||||
off = MIN(off, d->size);
|
||||
off = MIN(offset, d->size);
|
||||
d->seek = off;
|
||||
break;
|
||||
case ctx_data_seek_current:
|
||||
off = MIN(off, d->size - d->seek);
|
||||
off = MIN(offset, d->size - d->seek);
|
||||
d->seek += off;
|
||||
break;
|
||||
}
|
||||
@ -288,31 +288,185 @@ vbdev_ocf_ctx_data_secure_erase(ctx_data_t *ctx_data)
|
||||
}
|
||||
}
|
||||
|
||||
int vbdev_ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue, const struct ocf_queue_ops *ops)
|
||||
{
|
||||
int rc;
|
||||
struct vbdev_ocf_cache_ctx *ctx = ocf_cache_get_priv(cache);
|
||||
|
||||
pthread_mutex_lock(&ctx->lock);
|
||||
rc = ocf_queue_create(cache, queue, ops);
|
||||
pthread_mutex_unlock(&ctx->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void vbdev_ocf_queue_put(ocf_queue_t queue)
|
||||
{
|
||||
ocf_cache_t cache = ocf_queue_get_cache(queue);
|
||||
struct vbdev_ocf_cache_ctx *ctx = ocf_cache_get_priv(cache);
|
||||
|
||||
pthread_mutex_lock(&ctx->lock);
|
||||
ocf_queue_put(queue);
|
||||
pthread_mutex_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
void vbdev_ocf_cache_ctx_put(struct vbdev_ocf_cache_ctx *ctx)
|
||||
{
|
||||
if (env_atomic_dec_return(&ctx->refcnt) == 0) {
|
||||
pthread_mutex_destroy(&ctx->lock);
|
||||
free(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void vbdev_ocf_cache_ctx_get(struct vbdev_ocf_cache_ctx *ctx)
|
||||
{
|
||||
env_atomic_inc(&ctx->refcnt);
|
||||
}
|
||||
|
||||
struct cleaner_priv {
|
||||
struct spdk_poller *poller;
|
||||
ocf_queue_t queue;
|
||||
uint64_t next_run;
|
||||
};
|
||||
|
||||
static int
|
||||
cleaner_poll(void *arg)
|
||||
{
|
||||
ocf_cleaner_t cleaner = arg;
|
||||
struct cleaner_priv *priv = ocf_cleaner_get_priv(cleaner);
|
||||
uint32_t iono = ocf_queue_pending_io(priv->queue);
|
||||
int i, max = spdk_min(32, iono);
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
ocf_queue_run_single(priv->queue);
|
||||
}
|
||||
|
||||
if (spdk_get_ticks() >= priv->next_run) {
|
||||
ocf_cleaner_run(cleaner, priv->queue);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (iono > 0) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
cleaner_cmpl(ocf_cleaner_t c, uint32_t interval)
|
||||
{
|
||||
struct cleaner_priv *priv = ocf_cleaner_get_priv(c);
|
||||
|
||||
priv->next_run = spdk_get_ticks() + ((interval * spdk_get_ticks_hz()) / 1000);
|
||||
}
|
||||
|
||||
static void
|
||||
cleaner_queue_kick(ocf_queue_t q)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
cleaner_queue_stop(ocf_queue_t q)
|
||||
{
|
||||
struct cleaner_priv *cpriv = ocf_queue_get_priv(q);
|
||||
|
||||
if (cpriv) {
|
||||
spdk_poller_unregister(&cpriv->poller);
|
||||
free(cpriv);
|
||||
}
|
||||
}
|
||||
|
||||
const struct ocf_queue_ops cleaner_queue_ops = {
|
||||
.kick_sync = cleaner_queue_kick,
|
||||
.kick = cleaner_queue_kick,
|
||||
.stop = cleaner_queue_stop,
|
||||
};
|
||||
|
||||
static int
|
||||
vbdev_ocf_ctx_cleaner_init(ocf_cleaner_t c)
|
||||
{
|
||||
/* TODO [writeback]: implement with writeback mode support */
|
||||
int rc;
|
||||
struct cleaner_priv *priv = calloc(1, sizeof(*priv));
|
||||
ocf_cache_t cache = ocf_cleaner_get_cache(c);
|
||||
struct vbdev_ocf_cache_ctx *cctx = ocf_cache_get_priv(cache);
|
||||
|
||||
if (priv == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rc = vbdev_ocf_queue_create(cache, &priv->queue, &cleaner_queue_ops);
|
||||
if (rc) {
|
||||
free(priv);
|
||||
return rc;
|
||||
}
|
||||
|
||||
ocf_queue_set_priv(priv->queue, priv);
|
||||
|
||||
cctx->cleaner_queue = priv->queue;
|
||||
|
||||
ocf_cleaner_set_cmpl(c, cleaner_cmpl);
|
||||
ocf_cleaner_set_priv(c, priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
vbdev_ocf_ctx_cleaner_stop(ocf_cleaner_t c)
|
||||
{
|
||||
/* TODO [writeback]: implement with writeback mode support */
|
||||
struct cleaner_priv *priv = ocf_cleaner_get_priv(c);
|
||||
|
||||
vbdev_ocf_queue_put(priv->queue);
|
||||
}
|
||||
|
||||
static int vbdev_ocf_volume_updater_init(ocf_metadata_updater_t mu)
|
||||
static void
|
||||
vbdev_ocf_ctx_cleaner_kick(ocf_cleaner_t cleaner)
|
||||
{
|
||||
/* TODO [metadata]: implement with persistent metadata support */
|
||||
struct cleaner_priv *priv = ocf_cleaner_get_priv(cleaner);
|
||||
|
||||
if (priv->poller) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* We start cleaner poller at the same thread where cache was created
|
||||
* TODO: allow user to specify core at which cleaner should run */
|
||||
priv->poller = spdk_poller_register(cleaner_poll, cleaner, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
vbdev_ocf_md_kick(void *ctx)
|
||||
{
|
||||
ocf_metadata_updater_t mu = ctx;
|
||||
ocf_cache_t cache = ocf_metadata_updater_get_cache(mu);
|
||||
|
||||
if (ocf_cache_is_running(cache)) {
|
||||
ocf_metadata_updater_run(mu);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
vbdev_ocf_volume_updater_init(ocf_metadata_updater_t mu)
|
||||
{
|
||||
struct spdk_thread *md_thread = spdk_get_thread();
|
||||
|
||||
ocf_metadata_updater_set_priv(mu, md_thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
static void vbdev_ocf_volume_updater_stop(ocf_metadata_updater_t mu)
|
||||
|
||||
static void
|
||||
vbdev_ocf_volume_updater_stop(ocf_metadata_updater_t mu)
|
||||
{
|
||||
/* TODO [metadata]: implement with persistent metadata support */
|
||||
|
||||
}
|
||||
static void vbdev_ocf_volume_updater_kick(ocf_metadata_updater_t mu)
|
||||
|
||||
static void
|
||||
vbdev_ocf_volume_updater_kick(ocf_metadata_updater_t mu)
|
||||
{
|
||||
/* TODO [metadata]: implement with persistent metadata support */
|
||||
struct spdk_thread *md_thread = ocf_metadata_updater_get_priv(mu);
|
||||
|
||||
/* We need to send message to updater thread because
|
||||
* kick can happen from any thread */
|
||||
spdk_thread_send_msg(md_thread, vbdev_ocf_md_kick, mu);
|
||||
}
|
||||
|
||||
/* This function is main way by which OCF communicates with user
|
||||
@ -362,10 +516,11 @@ static const struct ocf_ctx_config vbdev_ocf_ctx_cfg = {
|
||||
.cleaner = {
|
||||
.init = vbdev_ocf_ctx_cleaner_init,
|
||||
.stop = vbdev_ocf_ctx_cleaner_stop,
|
||||
.kick = vbdev_ocf_ctx_cleaner_kick,
|
||||
},
|
||||
|
||||
.logger = {
|
||||
.printf = vbdev_ocf_ctx_log_printf,
|
||||
.print = vbdev_ocf_ctx_log_printf,
|
||||
.dump_stack = NULL,
|
||||
},
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#define VBDEV_OCF_CTX_H
|
||||
|
||||
#include <ocf/ocf.h>
|
||||
#include "spdk/thread.h"
|
||||
|
||||
extern ocf_ctx_t vbdev_ocf_ctx;
|
||||
|
||||
@ -42,7 +43,24 @@ extern ocf_ctx_t vbdev_ocf_ctx;
|
||||
|
||||
#define SPDK_OBJECT 1
|
||||
|
||||
/* Context of cache instance */
|
||||
struct vbdev_ocf_cache_ctx {
|
||||
ocf_queue_t mngt_queue;
|
||||
ocf_queue_t cleaner_queue;
|
||||
struct spdk_io_channel *management_channel;
|
||||
pthread_mutex_t lock;
|
||||
env_atomic refcnt;
|
||||
};
|
||||
|
||||
void vbdev_ocf_cache_ctx_put(struct vbdev_ocf_cache_ctx *ctx);
|
||||
void vbdev_ocf_cache_ctx_get(struct vbdev_ocf_cache_ctx *ctx);
|
||||
|
||||
int vbdev_ocf_ctx_init(void);
|
||||
void vbdev_ocf_ctx_cleanup(void);
|
||||
|
||||
/* Thread safe queue creation and deletion
|
||||
* These are wrappers for original ocf_queue_create() and ocf_queue_put() */
|
||||
int vbdev_ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue, const struct ocf_queue_ops *ops);
|
||||
void vbdev_ocf_queue_put(ocf_queue_t queue);
|
||||
|
||||
#endif
|
||||
|
11
lib/bdev/ocf/env/ocf_env.h
vendored
11
lib/bdev/ocf/env/ocf_env.h
vendored
@ -145,6 +145,17 @@ static inline void *env_vzalloc(size_t size)
|
||||
SPDK_MALLOC_DMA);
|
||||
}
|
||||
|
||||
static inline void *env_secure_alloc(size_t size)
|
||||
{
|
||||
return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
|
||||
SPDK_MALLOC_DMA);
|
||||
}
|
||||
|
||||
static inline void env_secure_free(const void *ptr, size_t size)
|
||||
{
|
||||
return spdk_free((void *)ptr);
|
||||
}
|
||||
|
||||
static inline void env_vfree(const void *ptr)
|
||||
{
|
||||
return spdk_free((void *)ptr);
|
||||
|
@ -74,7 +74,12 @@ mngt_poll_fn(void *opaque)
|
||||
struct vbdev_ocf *vbdev = opaque;
|
||||
|
||||
if (vbdev->mngt_ctx.poller_fn) {
|
||||
vbdev->mngt_ctx.poller_fn(vbdev);
|
||||
if (vbdev->mngt_ctx.timeout_ts &&
|
||||
spdk_get_ticks() >= vbdev->mngt_ctx.timeout_ts) {
|
||||
vbdev_ocf_mngt_continue(vbdev, -ETIMEDOUT);
|
||||
} else {
|
||||
vbdev->mngt_ctx.poller_fn(vbdev);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -104,11 +109,34 @@ vbdev_ocf_mngt_start(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_fn *path,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
vbdev_ocf_mngt_poll_set_timeout(struct vbdev_ocf *vbdev, uint64_t millisec)
|
||||
{
|
||||
uint64_t ticks;
|
||||
|
||||
ticks = millisec * spdk_get_ticks_hz() / 1000;
|
||||
vbdev->mngt_ctx.timeout_ts = spdk_get_ticks() + ticks;
|
||||
}
|
||||
|
||||
void
|
||||
vbdev_ocf_mngt_poll(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_fn fn)
|
||||
{
|
||||
assert(vbdev->mngt_ctx.poller != NULL);
|
||||
vbdev->mngt_ctx.poller_fn = fn;
|
||||
vbdev_ocf_mngt_poll_set_timeout(vbdev, 5000);
|
||||
}
|
||||
|
||||
void
|
||||
vbdev_ocf_mngt_stop(struct vbdev_ocf *vbdev)
|
||||
{
|
||||
spdk_poller_unregister(&vbdev->mngt_ctx.poller);
|
||||
|
||||
if (vbdev->mngt_ctx.cb) {
|
||||
vbdev->mngt_ctx.cb(vbdev->mngt_ctx.status, vbdev, vbdev->mngt_ctx.cb_arg);
|
||||
}
|
||||
|
||||
memset(&vbdev->mngt_ctx, 0, sizeof(vbdev->mngt_ctx));
|
||||
}
|
||||
|
||||
void
|
||||
@ -129,9 +157,5 @@ vbdev_ocf_mngt_continue(struct vbdev_ocf *vbdev, int status)
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_poller_unregister(&vbdev->mngt_ctx.poller);
|
||||
if (vbdev->mngt_ctx.cb) {
|
||||
vbdev->mngt_ctx.cb(vbdev->mngt_ctx.status, vbdev->mngt_ctx.cb_arg);
|
||||
}
|
||||
memset(&vbdev->mngt_ctx, 0, sizeof(vbdev->mngt_ctx));
|
||||
vbdev_ocf_mngt_stop(vbdev);
|
||||
}
|
||||
|
@ -49,11 +49,15 @@ int vbdev_ocf_mngt_start(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_fn *path,
|
||||
vbdev_ocf_mngt_callback cb, void *cb_arg);
|
||||
|
||||
/* Continue execution with polling operation (fn)
|
||||
* fn must invoke vbdev_ocf_mngt_continue() to stop polling */
|
||||
* fn must invoke vbdev_ocf_mngt_continue() to stop polling
|
||||
* Poller has default timeout of 5 seconds */
|
||||
void vbdev_ocf_mngt_poll(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_fn fn);
|
||||
|
||||
/* Continue execution with next function that is on path
|
||||
* If next function is NULL, finish management operation and invoke callback */
|
||||
void vbdev_ocf_mngt_continue(struct vbdev_ocf *vbdev, int status);
|
||||
|
||||
/* Stop the execution and invoke callback with last status returned */
|
||||
void vbdev_ocf_mngt_stop(struct vbdev_ocf *vbdev);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -39,6 +39,8 @@
|
||||
#include "spdk/bdev.h"
|
||||
#include "spdk/bdev_module.h"
|
||||
|
||||
#define VBDEV_OCF_MD_MAX_LEN 4096
|
||||
|
||||
struct vbdev_ocf;
|
||||
|
||||
/* Context for OCF queue poller
|
||||
@ -65,6 +67,8 @@ struct vbdev_ocf_state {
|
||||
bool doing_reset;
|
||||
/* From the moment when exp_bdev is registered */
|
||||
bool started;
|
||||
/* Status of last attempt for stopping this device */
|
||||
int stop_status;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -79,11 +83,15 @@ struct vbdev_ocf_config {
|
||||
|
||||
/* Core initial config */
|
||||
struct ocf_mngt_core_config core;
|
||||
|
||||
/* Load flag, if set to true, then we will try load cache instance from disk,
|
||||
* otherwise we will create new cache on that disk */
|
||||
bool loadq;
|
||||
};
|
||||
|
||||
/* Types for management operations */
|
||||
typedef void (*vbdev_ocf_mngt_fn)(struct vbdev_ocf *);
|
||||
typedef void (*vbdev_ocf_mngt_callback)(int, void *);
|
||||
typedef void (*vbdev_ocf_mngt_callback)(int, struct vbdev_ocf *, void *);
|
||||
|
||||
/* Context for asynchronous management operations
|
||||
* Single management operation usually contains a list of sub procedures,
|
||||
@ -97,6 +105,8 @@ struct vbdev_ocf_mngt_ctx {
|
||||
struct spdk_poller *poller;
|
||||
/* Function that gets invoked by poller on each iteration */
|
||||
vbdev_ocf_mngt_fn poller_fn;
|
||||
/* Poller timeout time stamp - when the poller should stop with error */
|
||||
uint64_t timeout_ts;
|
||||
|
||||
/* Status of management operation */
|
||||
int status;
|
||||
@ -126,6 +136,9 @@ struct vbdev_ocf_base {
|
||||
/* True if SPDK bdev has been claimed and opened for writing */
|
||||
bool attached;
|
||||
|
||||
/* Channel for cleaner operations */
|
||||
struct spdk_io_channel *management_channel;
|
||||
|
||||
/* Reference to main vbdev */
|
||||
struct vbdev_ocf *parent;
|
||||
};
|
||||
@ -152,19 +165,27 @@ struct vbdev_ocf {
|
||||
|
||||
/* Management context */
|
||||
struct vbdev_ocf_mngt_ctx mngt_ctx;
|
||||
/* Cache conext */
|
||||
struct vbdev_ocf_cache_ctx *cache_ctx;
|
||||
|
||||
/* Exposed SPDK bdev. Registered in bdev layer */
|
||||
struct spdk_bdev exp_bdev;
|
||||
|
||||
/* OCF uuid for core device of this vbdev */
|
||||
char uuid[VBDEV_OCF_MD_MAX_LEN];
|
||||
|
||||
/* Link to global list of this type structures */
|
||||
TAILQ_ENTRY(vbdev_ocf) tailq;
|
||||
};
|
||||
|
||||
int vbdev_ocf_construct(
|
||||
void vbdev_ocf_construct(
|
||||
const char *vbdev_name,
|
||||
const char *cache_mode_name,
|
||||
const char *cache_name,
|
||||
const char *core_name);
|
||||
const char *core_name,
|
||||
bool loadq,
|
||||
void (*cb)(int, struct vbdev_ocf *, void *),
|
||||
void *cb_arg);
|
||||
|
||||
/* If vbdev is online, return its object */
|
||||
struct vbdev_ocf *vbdev_ocf_get_by_name(const char *name);
|
||||
|
@ -62,13 +62,31 @@ static const struct spdk_json_object_decoder rpc_construct_ocf_bdev_decoders[] =
|
||||
{"core_bdev_name", offsetof(struct rpc_construct_ocf_bdev, core_bdev_name), spdk_json_decode_string},
|
||||
};
|
||||
|
||||
static void
|
||||
construct_cb(int status, struct vbdev_ocf *vbdev, void *cb_arg)
|
||||
{
|
||||
struct spdk_jsonrpc_request *request = cb_arg;
|
||||
struct spdk_json_write_ctx *w;
|
||||
|
||||
if (status) {
|
||||
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
"Could not create OCF vbdev: %d",
|
||||
status);
|
||||
} else {
|
||||
w = spdk_jsonrpc_begin_result(request);
|
||||
if (w) {
|
||||
spdk_json_write_string(w, vbdev->name);
|
||||
spdk_jsonrpc_end_result(request, w);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_rpc_construct_ocf_bdev(struct spdk_jsonrpc_request *request,
|
||||
const struct spdk_json_val *params)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rpc_construct_ocf_bdev req = {NULL};
|
||||
struct spdk_json_write_ctx *w;
|
||||
int ret;
|
||||
|
||||
ret = spdk_json_decode_object(params, rpc_construct_ocf_bdev_decoders,
|
||||
SPDK_COUNTOF(rpc_construct_ocf_bdev_decoders),
|
||||
@ -76,24 +94,12 @@ spdk_rpc_construct_ocf_bdev(struct spdk_jsonrpc_request *request,
|
||||
if (ret) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
||||
"Invalid parameters");
|
||||
goto end;
|
||||
free_rpc_construct_ocf_bdev(&req);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = vbdev_ocf_construct(req.name, req.mode, req.cache_bdev_name, req.core_bdev_name);
|
||||
if (ret) {
|
||||
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
"Could not create OCF vbdev: %s",
|
||||
spdk_strerror(-ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
w = spdk_jsonrpc_begin_result(request);
|
||||
if (w) {
|
||||
spdk_json_write_string(w, req.name);
|
||||
spdk_jsonrpc_end_result(request, w);
|
||||
}
|
||||
|
||||
end:
|
||||
vbdev_ocf_construct(req.name, req.mode, req.cache_bdev_name, req.core_bdev_name, false,
|
||||
construct_cb, request);
|
||||
free_rpc_construct_ocf_bdev(&req);
|
||||
}
|
||||
SPDK_RPC_REGISTER("construct_ocf_bdev", spdk_rpc_construct_ocf_bdev, SPDK_RPC_RUNTIME)
|
||||
|
@ -44,14 +44,18 @@
|
||||
#include "vbdev_ocf.h"
|
||||
|
||||
static int
|
||||
vbdev_ocf_volume_open(ocf_volume_t volume)
|
||||
vbdev_ocf_volume_open(ocf_volume_t volume, void *opts)
|
||||
{
|
||||
struct vbdev_ocf_base **priv = ocf_volume_get_priv(volume);
|
||||
struct vbdev_ocf_base *base = vbdev_ocf_get_base_by_name(ocf_volume_get_uuid(volume)->data);
|
||||
struct vbdev_ocf_base *base;
|
||||
|
||||
if (base == NULL) {
|
||||
assert(false);
|
||||
return -EINVAL;
|
||||
if (opts) {
|
||||
base = opts;
|
||||
} else {
|
||||
base = vbdev_ocf_get_base_by_name(ocf_volume_get_uuid(volume)->data);
|
||||
if (base == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
*priv = base;
|
||||
@ -167,23 +171,14 @@ vbdev_ocf_volume_submit_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *
|
||||
|
||||
io = opaque;
|
||||
io_ctx = ocf_get_io_ctx(io);
|
||||
|
||||
assert(io_ctx != NULL);
|
||||
|
||||
if (!success) {
|
||||
io_ctx->error |= 1;
|
||||
}
|
||||
|
||||
if (io_ctx->offset && bdev_io != NULL) {
|
||||
switch (bdev_io->type) {
|
||||
case SPDK_BDEV_IO_TYPE_READ:
|
||||
case SPDK_BDEV_IO_TYPE_WRITE:
|
||||
env_free(bdev_io->u.bdev.iovs);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
if (io_ctx->iovs_allocated && bdev_io != NULL) {
|
||||
env_free(bdev_io->u.bdev.iovs);
|
||||
}
|
||||
|
||||
if (io_ctx->error) {
|
||||
@ -212,6 +207,8 @@ prepare_submit(struct ocf_io *io)
|
||||
struct vbdev_ocf_qcxt *qctx;
|
||||
struct vbdev_ocf_base *base;
|
||||
ocf_queue_t q = io->io_queue;
|
||||
ocf_cache_t cache;
|
||||
struct vbdev_ocf_cache_ctx *cctx;
|
||||
int rc = 0;
|
||||
|
||||
io_ctx->rq_cnt++;
|
||||
@ -232,6 +229,14 @@ prepare_submit(struct ocf_io *io)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cache = ocf_queue_get_cache(q);
|
||||
cctx = ocf_cache_get_priv(cache);
|
||||
|
||||
if (q == cctx->cleaner_queue || q == cctx->mngt_queue) {
|
||||
io_ctx->ch = base->management_channel;
|
||||
return 0;
|
||||
}
|
||||
|
||||
qctx = ocf_queue_get_priv(q);
|
||||
if (qctx == NULL) {
|
||||
return -EFAULT;
|
||||
@ -253,11 +258,6 @@ vbdev_ocf_volume_submit_flush(struct ocf_io *io)
|
||||
struct ocf_io_ctx *io_ctx = ocf_get_io_ctx(io);
|
||||
int status;
|
||||
|
||||
if (base->is_cache) {
|
||||
io->end(io, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
status = prepare_submit(io);
|
||||
if (status) {
|
||||
SPDK_ERRLOG("Preparing io failed with status=%d\n", status);
|
||||
@ -302,27 +302,41 @@ vbdev_ocf_volume_submit_io(struct ocf_io *io)
|
||||
len = io->bytes;
|
||||
offset = io_ctx->offset;
|
||||
|
||||
if (offset) {
|
||||
i = get_starting_vec(io_ctx->data->iovs, io_ctx->data->iovcnt, &offset);
|
||||
if (len < io_ctx->data->size) {
|
||||
if (io_ctx->data->iovcnt == 1) {
|
||||
if (io->dir == OCF_READ) {
|
||||
status = spdk_bdev_read(base->desc, io_ctx->ch,
|
||||
io_ctx->data->iovs[0].iov_base + offset, addr, len,
|
||||
vbdev_ocf_volume_submit_io_cb, io);
|
||||
} else if (io->dir == OCF_WRITE) {
|
||||
status = spdk_bdev_write(base->desc, io_ctx->ch,
|
||||
io_ctx->data->iovs[0].iov_base + offset, addr, len,
|
||||
vbdev_ocf_volume_submit_io_cb, io);
|
||||
}
|
||||
goto end;
|
||||
} else {
|
||||
i = get_starting_vec(io_ctx->data->iovs, io_ctx->data->iovcnt, &offset);
|
||||
|
||||
if (i < 0) {
|
||||
SPDK_ERRLOG("offset bigger than data size\n");
|
||||
vbdev_ocf_volume_submit_io_cb(NULL, false, io);
|
||||
return;
|
||||
if (i < 0) {
|
||||
SPDK_ERRLOG("offset bigger than data size\n");
|
||||
vbdev_ocf_volume_submit_io_cb(NULL, false, io);
|
||||
return;
|
||||
}
|
||||
|
||||
iovcnt = io_ctx->data->iovcnt - i;
|
||||
|
||||
io_ctx->iovs_allocated = true;
|
||||
iovs = env_malloc(sizeof(*iovs) * iovcnt, ENV_MEM_NOIO);
|
||||
|
||||
if (!iovs) {
|
||||
SPDK_ERRLOG("allocation failed\n");
|
||||
vbdev_ocf_volume_submit_io_cb(NULL, false, io);
|
||||
return;
|
||||
}
|
||||
|
||||
initialize_cpy_vector(iovs, io_ctx->data->iovcnt, &io_ctx->data->iovs[i],
|
||||
iovcnt, offset, len);
|
||||
}
|
||||
|
||||
iovcnt = io_ctx->data->iovcnt - i;
|
||||
|
||||
iovs = env_malloc(sizeof(*iovs) * iovcnt, ENV_MEM_NOIO);
|
||||
|
||||
if (!iovs) {
|
||||
SPDK_ERRLOG("allocation failed\n");
|
||||
vbdev_ocf_volume_submit_io_cb(NULL, false, io);
|
||||
return;
|
||||
}
|
||||
|
||||
initialize_cpy_vector(iovs, io_ctx->data->iovcnt, &io_ctx->data->iovs[i],
|
||||
iovcnt, offset, len);
|
||||
} else {
|
||||
iovs = io_ctx->data->iovs;
|
||||
iovcnt = io_ctx->data->iovcnt;
|
||||
@ -336,6 +350,7 @@ vbdev_ocf_volume_submit_io(struct ocf_io *io)
|
||||
iovs, iovcnt, addr, len, vbdev_ocf_volume_submit_io_cb, io);
|
||||
}
|
||||
|
||||
end:
|
||||
if (status) {
|
||||
/* TODO [ENOMEM]: implement ENOMEM handling when submitting IO to base device */
|
||||
|
||||
|
@ -49,6 +49,7 @@ struct ocf_io_ctx {
|
||||
int ref;
|
||||
int rq_cnt;
|
||||
int error;
|
||||
bool iovs_allocated;
|
||||
};
|
||||
|
||||
int vbdev_ocf_volume_init(void);
|
||||
|
@ -633,6 +633,7 @@ vbdev_passthru_register(struct spdk_bdev *bdev)
|
||||
rc = spdk_bdev_register(&pt_node->pt_bdev);
|
||||
if (rc) {
|
||||
SPDK_ERRLOG("could not register pt_bdev\n");
|
||||
spdk_bdev_module_release_bdev(&pt_node->pt_bdev);
|
||||
spdk_bdev_close(pt_node->base_desc);
|
||||
TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
|
||||
spdk_io_device_unregister(pt_node, NULL);
|
||||
|
@ -1848,6 +1848,17 @@ raid_bdev_remove_base_devices(struct raid_bdev_config *raid_cfg,
|
||||
return;
|
||||
}
|
||||
|
||||
if (raid_bdev->destroy_started) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "destroying raid bdev %s is already started\n",
|
||||
raid_cfg->name);
|
||||
if (cb_fn) {
|
||||
cb_fn(cb_arg, -EALREADY);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
raid_bdev->destroy_started = true;
|
||||
|
||||
for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
|
||||
info = &raid_bdev->base_bdev_info[i];
|
||||
|
||||
|
@ -127,6 +127,9 @@ struct raid_bdev {
|
||||
|
||||
/* Set to true if destruct is called for this raid bdev */
|
||||
bool destruct_called;
|
||||
|
||||
/* Set to true if destroy of this raid bdev is started. */
|
||||
bool destroy_started;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -337,32 +337,51 @@ static const struct spdk_json_object_decoder rpc_destroy_raid_bdev_decoders[] =
|
||||
{"name", offsetof(struct rpc_destroy_raid_bdev, name), spdk_json_decode_string},
|
||||
};
|
||||
|
||||
struct rpc_destroy_raid_bdev_ctx {
|
||||
struct rpc_destroy_raid_bdev req;
|
||||
struct raid_bdev_config *raid_cfg;
|
||||
struct spdk_jsonrpc_request *request;
|
||||
};
|
||||
|
||||
/*
|
||||
* brief:
|
||||
* Since destroying raid_bdev is asynchronous operation, so this function is
|
||||
* used to check if raid bdev still exists. If raid bdev is still there it will create
|
||||
* event and check later, otherwise it will proceed with cleanup
|
||||
* params:
|
||||
* arg - pointer to raid bdev cfg
|
||||
* cb_arg - pointer to the callback context.
|
||||
* rc - return code of the destruction of the raid bdev.
|
||||
* returns:
|
||||
* none
|
||||
*/
|
||||
static void
|
||||
raid_bdev_config_destroy(void *arg)
|
||||
destroy_raid_bdev_done(void *cb_arg, int rc)
|
||||
{
|
||||
struct raid_bdev_config *raid_cfg = arg;
|
||||
struct rpc_destroy_raid_bdev_ctx *ctx = cb_arg;
|
||||
struct raid_bdev_config *raid_cfg;
|
||||
struct spdk_jsonrpc_request *request = ctx->request;
|
||||
struct spdk_json_write_ctx *w;
|
||||
|
||||
assert(raid_cfg != NULL);
|
||||
if (raid_cfg->raid_bdev != NULL) {
|
||||
/*
|
||||
* If raid bdev exists for this config, wait for raid bdev to get
|
||||
* destroyed and come back later
|
||||
*/
|
||||
spdk_thread_send_msg(spdk_get_thread(), raid_bdev_config_destroy,
|
||||
raid_cfg);
|
||||
} else {
|
||||
raid_bdev_config_cleanup(raid_cfg);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("Failed to destroy raid bdev %s (%d): %s\n",
|
||||
ctx->req.name, rc, spdk_strerror(-rc));
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
spdk_strerror(-rc));
|
||||
goto exit;
|
||||
}
|
||||
|
||||
raid_cfg = ctx->raid_cfg;
|
||||
assert(raid_cfg->raid_bdev == NULL);
|
||||
|
||||
raid_bdev_config_cleanup(raid_cfg);
|
||||
|
||||
w = spdk_jsonrpc_begin_result(request);
|
||||
if (w == NULL) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
spdk_json_write_bool(w, true);
|
||||
spdk_jsonrpc_end_result(request, w);
|
||||
exit:
|
||||
free_rpc_destroy_raid_bdev(&ctx->req);
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -379,42 +398,39 @@ raid_bdev_config_destroy(void *arg)
|
||||
static void
|
||||
spdk_rpc_destroy_raid_bdev(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
|
||||
{
|
||||
struct rpc_destroy_raid_bdev req = {};
|
||||
struct spdk_json_write_ctx *w;
|
||||
struct raid_bdev_config *raid_cfg = NULL;
|
||||
struct rpc_destroy_raid_bdev_ctx *ctx;
|
||||
|
||||
ctx = calloc(1, sizeof(*ctx));
|
||||
if (!ctx) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
spdk_strerror(ENOMEM));
|
||||
return;
|
||||
}
|
||||
|
||||
if (spdk_json_decode_object(params, rpc_destroy_raid_bdev_decoders,
|
||||
SPDK_COUNTOF(rpc_destroy_raid_bdev_decoders),
|
||||
&req)) {
|
||||
&ctx->req)) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
||||
"Invalid parameters");
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
raid_cfg = raid_bdev_config_find_by_name(req.name);
|
||||
if (raid_cfg == NULL) {
|
||||
ctx->raid_cfg = raid_bdev_config_find_by_name(ctx->req.name);
|
||||
if (ctx->raid_cfg == NULL) {
|
||||
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
||||
"raid bdev %s is not found in config", req.name);
|
||||
"raid bdev %s is not found in config", ctx->req.name);
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
ctx->request = request;
|
||||
|
||||
/* Remove all the base bdevs from this raid bdev before destroying the raid bdev */
|
||||
raid_bdev_remove_base_devices(raid_cfg, NULL, NULL);
|
||||
raid_bdev_remove_base_devices(ctx->raid_cfg, destroy_raid_bdev_done, ctx);
|
||||
|
||||
raid_bdev_config_destroy(raid_cfg);
|
||||
|
||||
free_rpc_destroy_raid_bdev(&req);
|
||||
|
||||
w = spdk_jsonrpc_begin_result(request);
|
||||
if (w == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_json_write_bool(w, true);
|
||||
spdk_jsonrpc_end_result(request, w);
|
||||
return;
|
||||
|
||||
invalid:
|
||||
free_rpc_destroy_raid_bdev(&req);
|
||||
free_rpc_destroy_raid_bdev(&ctx->req);
|
||||
free(ctx);
|
||||
}
|
||||
SPDK_RPC_REGISTER("destroy_raid_bdev", spdk_rpc_destroy_raid_bdev, SPDK_RPC_RUNTIME)
|
||||
|
@ -49,6 +49,12 @@
|
||||
|
||||
#define BLOB_CRC32C_INITIAL 0xffffffffUL
|
||||
|
||||
#ifdef SPDK_ENABLE_SNAPSHOT_DELETION
|
||||
bool g_delete_snapshot_enabled = true;
|
||||
#else
|
||||
bool g_delete_snapshot_enabled = false;
|
||||
#endif
|
||||
|
||||
static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
|
||||
static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
|
||||
static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
|
||||
@ -2544,6 +2550,8 @@ struct spdk_bs_load_ctx {
|
||||
spdk_bs_sequence_t *seq;
|
||||
spdk_blob_op_with_handle_complete iter_cb_fn;
|
||||
void *iter_cb_arg;
|
||||
struct spdk_blob *blob;
|
||||
spdk_blob_id blobid;
|
||||
};
|
||||
|
||||
static void
|
||||
@ -2693,20 +2701,154 @@ _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
|
||||
_spdk_blob_set_thin_provision(struct spdk_blob *blob)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = arg;
|
||||
_spdk_blob_verify_md_op(blob);
|
||||
blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
|
||||
blob->state = SPDK_BLOB_STATE_DIRTY;
|
||||
}
|
||||
|
||||
if (bserrno == 0) {
|
||||
if (ctx->iter_cb_fn) {
|
||||
ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
|
||||
}
|
||||
_spdk_bs_blob_list_add(blob);
|
||||
spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
|
||||
static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
spdk_blob_id id;
|
||||
int64_t page_num;
|
||||
|
||||
/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
|
||||
* last blob has been removed */
|
||||
page_num = _spdk_bs_blobid_to_page(ctx->blobid);
|
||||
page_num++;
|
||||
page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
|
||||
if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
|
||||
_spdk_bs_load_iter(ctx, NULL, -ENOENT);
|
||||
return;
|
||||
}
|
||||
|
||||
if (bserrno == -ENOENT) {
|
||||
id = _spdk_bs_page_to_blobid(page_num);
|
||||
|
||||
spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno != 0) {
|
||||
SPDK_ERRLOG("Failed to close corrupted blob\n");
|
||||
spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
uint64_t i;
|
||||
|
||||
if (bserrno != 0) {
|
||||
SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
|
||||
spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Snapshot and clone have the same copy of cluster map at this point.
|
||||
* Let's clear cluster map for snpashot now so that it won't be cleared
|
||||
* for clone later when we remove snapshot. Also set thin provision to
|
||||
* pass data corruption check */
|
||||
for (i = 0; i < ctx->blob->active.num_clusters; i++) {
|
||||
ctx->blob->active.clusters[i] = 0;
|
||||
}
|
||||
|
||||
ctx->blob->md_ro = false;
|
||||
|
||||
_spdk_blob_set_thin_provision(ctx->blob);
|
||||
|
||||
ctx->blobid = ctx->blob->id;
|
||||
|
||||
spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno != 0) {
|
||||
SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
|
||||
spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->blob->md_ro = false;
|
||||
_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
|
||||
spdk_blob_set_read_only(ctx->blob);
|
||||
|
||||
if (ctx->iter_cb_fn) {
|
||||
ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
|
||||
}
|
||||
_spdk_bs_blob_list_add(ctx->blob);
|
||||
|
||||
spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno != 0) {
|
||||
SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
|
||||
spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (blob->parent_id == ctx->blob->id) {
|
||||
/* Power failure occured before updating clone - keep snapshot */
|
||||
spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx);
|
||||
} else {
|
||||
/* Power failure occured after updating clone - remove snapshot */
|
||||
spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = arg;
|
||||
const void *value;
|
||||
size_t len;
|
||||
int rc = 0;
|
||||
|
||||
if (bserrno == 0) {
|
||||
/* Examine blob if it is corrupted after power failure. Fix
|
||||
* the ones that can be fixed and remove any other corrupted
|
||||
* ones. If it is not corrupted just process it */
|
||||
rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
|
||||
if (rc != 0) {
|
||||
/* Not corrupted - process it and continue with iterating through blobs */
|
||||
if (ctx->iter_cb_fn) {
|
||||
ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
|
||||
}
|
||||
_spdk_bs_blob_list_add(blob);
|
||||
spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(len == sizeof(spdk_blob_id));
|
||||
|
||||
ctx->blob = blob;
|
||||
|
||||
/* Open clone to check if we are able to fix this blob or should we remove it */
|
||||
spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx);
|
||||
return;
|
||||
} else if (bserrno == -ENOENT) {
|
||||
bserrno = 0;
|
||||
} else {
|
||||
/*
|
||||
@ -4123,14 +4265,6 @@ _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_blob_set_thin_provision(struct spdk_blob *blob)
|
||||
{
|
||||
_spdk_blob_verify_md_op(blob);
|
||||
blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
|
||||
blob->state = SPDK_BLOB_STATE_DIRTY;
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_create_blob(struct spdk_blob_store *bs,
|
||||
const struct spdk_blob_opts *opts,
|
||||
@ -4371,7 +4505,7 @@ _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
|
||||
|
||||
if (bserrno != 0) {
|
||||
_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
|
||||
_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
|
||||
_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5010,14 +5144,6 @@ spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_f
|
||||
|
||||
/* START spdk_bs_delete_blob */
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_ebusy_close_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
spdk_bs_sequence_t *seq = cb_arg;
|
||||
|
||||
spdk_bs_sequence_finish(seq, -EBUSY);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
@ -5052,6 +5178,301 @@ _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
|
||||
}
|
||||
|
||||
struct delete_snapshot_ctx {
|
||||
struct spdk_blob_list *parent_snapshot_entry;
|
||||
struct spdk_blob *snapshot;
|
||||
bool snapshot_md_ro;
|
||||
struct spdk_blob *clone;
|
||||
bool clone_md_ro;
|
||||
spdk_blob_op_with_handle_complete cb_fn;
|
||||
void *cb_arg;
|
||||
int bserrno;
|
||||
};
|
||||
|
||||
static void
|
||||
_spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno != 0) {
|
||||
SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
|
||||
}
|
||||
|
||||
assert(ctx != NULL);
|
||||
|
||||
if (bserrno != 0 && ctx->bserrno == 0) {
|
||||
ctx->bserrno = bserrno;
|
||||
}
|
||||
|
||||
ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno != 0) {
|
||||
ctx->bserrno = bserrno;
|
||||
SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
|
||||
}
|
||||
|
||||
/* open_ref == 1 menas that only deletion context has opened this snapshot
|
||||
* open_ref == 2 menas that clone has opened this snapshot as well,
|
||||
* so we have to add it back to the blobs list */
|
||||
if (ctx->snapshot->open_ref == 2) {
|
||||
TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link);
|
||||
}
|
||||
|
||||
ctx->snapshot->locked_operation_in_progress = false;
|
||||
ctx->snapshot->md_ro = ctx->snapshot_md_ro;
|
||||
|
||||
spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
|
||||
ctx->clone->locked_operation_in_progress = false;
|
||||
ctx->clone->md_ro = ctx->clone_md_ro;
|
||||
|
||||
spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno) {
|
||||
ctx->bserrno = bserrno;
|
||||
_spdk_delete_snapshot_cleanup_clone(ctx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->clone->locked_operation_in_progress = false;
|
||||
spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
struct spdk_blob_list *parent_snapshot_entry = NULL;
|
||||
struct spdk_blob_list *snapshot_entry = NULL;
|
||||
struct spdk_blob_list *clone_entry = NULL;
|
||||
struct spdk_blob_list *snapshot_clone_entry = NULL;
|
||||
|
||||
if (bserrno) {
|
||||
SPDK_ERRLOG("Failed to sync MD on blob\n");
|
||||
ctx->bserrno = bserrno;
|
||||
_spdk_delete_snapshot_cleanup_clone(ctx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get snapshot entry for the snapshot we want to remove */
|
||||
snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
|
||||
|
||||
assert(snapshot_entry != NULL);
|
||||
|
||||
/* Remove clone entry in this snapshot (at this point there can be only one clone) */
|
||||
clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
|
||||
assert(clone_entry != NULL);
|
||||
TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
|
||||
snapshot_entry->clone_count--;
|
||||
assert(TAILQ_EMPTY(&snapshot_entry->clones));
|
||||
|
||||
if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) {
|
||||
/* This snapshot is at the same time a clone of another snapshot - we need to
|
||||
* update parent snapshot (remove current clone, add new one inherited from
|
||||
* the snapshot that is being removed) */
|
||||
|
||||
/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
|
||||
* snapshot that we are removing */
|
||||
_spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
|
||||
&snapshot_clone_entry);
|
||||
|
||||
/* Switch clone entry in parent snapshot */
|
||||
TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
|
||||
TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
|
||||
free(snapshot_clone_entry);
|
||||
} else {
|
||||
/* No parent snapshot - just remove clone entry */
|
||||
free(clone_entry);
|
||||
}
|
||||
|
||||
/* Restore md_ro flags */
|
||||
ctx->clone->md_ro = ctx->clone_md_ro;
|
||||
ctx->snapshot->md_ro = ctx->snapshot_md_ro;
|
||||
|
||||
_spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
uint64_t i;
|
||||
|
||||
ctx->snapshot->md_ro = false;
|
||||
|
||||
if (bserrno) {
|
||||
SPDK_ERRLOG("Failed to sync MD on clone\n");
|
||||
ctx->bserrno = bserrno;
|
||||
|
||||
/* Restore snapshot to previous state */
|
||||
bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
|
||||
if (bserrno != 0) {
|
||||
_spdk_delete_snapshot_cleanup_clone(ctx, bserrno);
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear cluster map entries for snapshot */
|
||||
for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
|
||||
if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
|
||||
ctx->snapshot->active.clusters[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
|
||||
|
||||
if (ctx->parent_snapshot_entry != NULL) {
|
||||
ctx->snapshot->back_bs_dev = NULL;
|
||||
}
|
||||
|
||||
spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
uint64_t i;
|
||||
|
||||
/* Temporarily override md_ro flag for clone for MD modification */
|
||||
ctx->clone_md_ro = ctx->clone->md_ro;
|
||||
ctx->clone->md_ro = false;
|
||||
|
||||
if (bserrno) {
|
||||
SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
|
||||
ctx->bserrno = bserrno;
|
||||
_spdk_delete_snapshot_cleanup_clone(ctx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Copy snapshot map to clone map (only unallocated clusters in clone) */
|
||||
for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
|
||||
if (ctx->clone->active.clusters[i] == 0) {
|
||||
ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
|
||||
ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev);
|
||||
|
||||
/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
|
||||
if (ctx->parent_snapshot_entry != NULL) {
|
||||
/* ...to parent snapshot */
|
||||
ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
|
||||
ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
|
||||
_spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
|
||||
sizeof(spdk_blob_id),
|
||||
true);
|
||||
} else {
|
||||
/* ...to blobid invalid and zeroes dev */
|
||||
ctx->clone->parent_id = SPDK_BLOBID_INVALID;
|
||||
ctx->clone->back_bs_dev = spdk_bs_create_zeroes_dev();
|
||||
_spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
|
||||
}
|
||||
|
||||
spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno) {
|
||||
SPDK_ERRLOG("Failed to freeze I/O on clone\n");
|
||||
ctx->bserrno = bserrno;
|
||||
_spdk_delete_snapshot_cleanup_clone(ctx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Temporarily override md_ro flag for snapshot for MD modification */
|
||||
ctx->snapshot_md_ro = ctx->snapshot->md_ro;
|
||||
ctx->snapshot->md_ro = false;
|
||||
|
||||
/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
|
||||
ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
|
||||
sizeof(spdk_blob_id), true);
|
||||
if (ctx->bserrno != 0) {
|
||||
_spdk_delete_snapshot_cleanup_clone(ctx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
|
||||
{
|
||||
struct delete_snapshot_ctx *ctx = cb_arg;
|
||||
|
||||
if (bserrno) {
|
||||
SPDK_ERRLOG("Failed to open clone\n");
|
||||
ctx->bserrno = bserrno;
|
||||
_spdk_delete_snapshot_cleanup_snapshot(ctx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->clone = clone;
|
||||
|
||||
if (clone->locked_operation_in_progress) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n");
|
||||
ctx->bserrno = -EBUSY;
|
||||
spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
clone->locked_operation_in_progress = true;
|
||||
|
||||
_spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
|
||||
{
|
||||
struct spdk_blob_list *snapshot_entry = NULL;
|
||||
struct spdk_blob_list *clone_entry = NULL;
|
||||
struct spdk_blob_list *snapshot_clone_entry = NULL;
|
||||
|
||||
/* Get snapshot entry for the snapshot we want to remove */
|
||||
snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id);
|
||||
|
||||
assert(snapshot_entry != NULL);
|
||||
|
||||
/* Get clone of the snapshot (at this point there can be only one clone) */
|
||||
clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
|
||||
assert(snapshot_entry->clone_count == 1);
|
||||
assert(clone_entry != NULL);
|
||||
|
||||
/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
|
||||
* snapshot that we are removing */
|
||||
_spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
|
||||
&snapshot_clone_entry);
|
||||
|
||||
spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
|
||||
{
|
||||
@ -5082,32 +5503,72 @@ _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
|
||||
}
|
||||
|
||||
static int
|
||||
_spdk_bs_is_blob_deletable(struct spdk_blob *blob)
|
||||
_spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
|
||||
{
|
||||
struct spdk_blob_list *snapshot_entry = NULL;
|
||||
|
||||
if (blob->open_ref > 1) {
|
||||
/* Someone has this blob open (besides this delete context). */
|
||||
return -EBUSY;
|
||||
}
|
||||
struct spdk_blob_list *clone_entry = NULL;
|
||||
struct spdk_blob *clone = NULL;
|
||||
bool has_one_clone = false;
|
||||
|
||||
/* Check if this is a snapshot with clones */
|
||||
snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
|
||||
if (snapshot_entry != NULL) {
|
||||
/* If snapshot have clones, we cannot remove it */
|
||||
if (!TAILQ_EMPTY(&snapshot_entry->clones)) {
|
||||
if (snapshot_entry->clone_count > 0 && !g_delete_snapshot_enabled) {
|
||||
SPDK_ERRLOG("Cannot remove snapshot with clones\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (snapshot_entry->clone_count > 1) {
|
||||
SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
|
||||
return -EBUSY;
|
||||
} else if (snapshot_entry->clone_count == 1) {
|
||||
has_one_clone = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if someone has this blob open (besides this delete context):
|
||||
* - open_ref = 1 - only this context opened blob, so it is ok to remove it
|
||||
* - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
|
||||
* and that is ok, because we will update it accordingly */
|
||||
if (blob->open_ref <= 2 && has_one_clone) {
|
||||
clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
|
||||
assert(clone_entry != NULL);
|
||||
clone = _spdk_blob_lookup(blob->bs, clone_entry->id);
|
||||
|
||||
if (blob->open_ref == 2 && clone == NULL) {
|
||||
/* Clone is closed and someone else opened this blob */
|
||||
SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
*update_clone = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (blob->open_ref > 1) {
|
||||
SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
assert(has_one_clone == false);
|
||||
*update_clone = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
|
||||
{
|
||||
spdk_bs_sequence_t *seq = cb_arg;
|
||||
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
}
|
||||
|
||||
static void
|
||||
_spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
|
||||
{
|
||||
spdk_bs_sequence_t *seq = cb_arg;
|
||||
struct delete_snapshot_ctx *ctx;
|
||||
bool update_clone = false;
|
||||
|
||||
if (bserrno != 0) {
|
||||
spdk_bs_sequence_finish(seq, bserrno);
|
||||
@ -5116,17 +5577,27 @@ _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
|
||||
|
||||
_spdk_blob_verify_md_op(blob);
|
||||
|
||||
bserrno = _spdk_bs_is_blob_deletable(blob);
|
||||
if (bserrno) {
|
||||
spdk_blob_close(blob, _spdk_bs_delete_ebusy_close_cpl, seq);
|
||||
ctx = calloc(1, sizeof(*ctx));
|
||||
if (ctx == NULL) {
|
||||
spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq);
|
||||
return;
|
||||
}
|
||||
|
||||
_spdk_bs_blob_list_remove(blob);
|
||||
ctx->snapshot = blob;
|
||||
ctx->cb_fn = _spdk_bs_delete_blob_finish;
|
||||
ctx->cb_arg = seq;
|
||||
|
||||
/* Check if blob can be removed and if it is a snapshot with clone on top of it */
|
||||
ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone);
|
||||
if (ctx->bserrno) {
|
||||
spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (blob->locked_operation_in_progress) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n");
|
||||
spdk_blob_close(blob, _spdk_bs_delete_ebusy_close_cpl, seq);
|
||||
ctx->bserrno = -EBUSY;
|
||||
spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5138,7 +5609,15 @@ _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
|
||||
*/
|
||||
TAILQ_REMOVE(&blob->bs->blobs, blob, link);
|
||||
|
||||
_spdk_bs_delete_blob_finish(seq, blob, 0);
|
||||
if (update_clone) {
|
||||
/* This blob is a snapshot with active clone - update clone first */
|
||||
_spdk_update_clone_on_snapshot_deletion(blob, ctx);
|
||||
} else {
|
||||
/* This blob does not have any clones - just remove it */
|
||||
_spdk_bs_blob_list_remove(blob);
|
||||
_spdk_bs_delete_blob_finish(seq, blob, 0);
|
||||
free(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -215,6 +215,7 @@ enum spdk_blob_op_type {
|
||||
|
||||
#define BLOB_SNAPSHOT "SNAP"
|
||||
#define SNAPSHOT_IN_PROGRESS "SNAPTMP"
|
||||
#define SNAPSHOT_PENDING_REMOVAL "SNAPRM"
|
||||
|
||||
struct spdk_blob_bs_dev {
|
||||
struct spdk_bs_dev bs_dev;
|
||||
|
@ -77,6 +77,7 @@ struct spdk_file {
|
||||
bool is_deleted;
|
||||
bool open_for_writing;
|
||||
uint64_t length_flushed;
|
||||
uint64_t length_xattr;
|
||||
uint64_t append_pos;
|
||||
uint64_t seq_byte_count;
|
||||
uint64_t next_seq_offset;
|
||||
@ -168,9 +169,16 @@ struct spdk_fs_cb_args {
|
||||
uint64_t offset;
|
||||
} readahead;
|
||||
struct {
|
||||
/* offset of the file when the sync request was made */
|
||||
uint64_t offset;
|
||||
TAILQ_ENTRY(spdk_fs_request) tailq;
|
||||
bool xattr_in_progress;
|
||||
/* length written to the xattr for this file - this should
|
||||
* always be the same as the offset if only one thread is
|
||||
* writing to the file, but could differ if multiple threads
|
||||
* are appending
|
||||
*/
|
||||
uint64_t length;
|
||||
} sync;
|
||||
struct {
|
||||
uint32_t num_clusters;
|
||||
@ -667,6 +675,7 @@ iter_cb(void *ctx, struct spdk_blob *blob, int rc)
|
||||
f->blobid = spdk_blob_get_id(blob);
|
||||
f->length = *length;
|
||||
f->length_flushed = *length;
|
||||
f->length_xattr = *length;
|
||||
f->append_pos = *length;
|
||||
SPDK_DEBUGLOG(SPDK_LOG_BLOBFS, "added file %s length=%ju\n", f->name, f->length);
|
||||
} else {
|
||||
@ -1955,6 +1964,7 @@ __file_cache_finish_sync(void *ctx, int bserrno)
|
||||
pthread_spin_lock(&file->lock);
|
||||
sync_req = TAILQ_FIRST(&file->sync_requests);
|
||||
sync_args = &sync_req->args;
|
||||
file->length_xattr = sync_args->op.sync.length;
|
||||
assert(sync_args->op.sync.offset <= file->length_flushed);
|
||||
BLOBFS_TRACE(file, "sync done offset=%jx\n", sync_args->op.sync.offset);
|
||||
TAILQ_REMOVE(&file->sync_requests, sync_req, args.op.sync.tailq);
|
||||
@ -1984,6 +1994,7 @@ __check_sync_reqs(struct spdk_file *file)
|
||||
if (sync_req != NULL && !sync_req->args.op.sync.xattr_in_progress) {
|
||||
BLOBFS_TRACE(file, "set xattr length 0x%jx\n", file->length_flushed);
|
||||
sync_req->args.op.sync.xattr_in_progress = true;
|
||||
sync_req->args.op.sync.length = file->length_flushed;
|
||||
spdk_blob_set_xattr(file->blob, "length", &file->length_flushed,
|
||||
sizeof(file->length_flushed));
|
||||
|
||||
@ -2042,11 +2053,15 @@ __file_flush(void *ctx)
|
||||
|
||||
pthread_spin_lock(&file->lock);
|
||||
next = spdk_tree_find_buffer(file->tree, file->length_flushed);
|
||||
if (next == NULL || next->in_progress) {
|
||||
if (next == NULL || next->in_progress ||
|
||||
((next->bytes_filled < next->buf_size) && TAILQ_EMPTY(&file->sync_requests))) {
|
||||
/*
|
||||
* There is either no data to flush, or a flush I/O is already in
|
||||
* progress. So return immediately - if a flush I/O is in
|
||||
* progress we will flush more data after that is completed.
|
||||
* There is either no data to flush, a flush I/O is already in
|
||||
* progress, or the next buffer is partially filled but there's no
|
||||
* outstanding request to sync it.
|
||||
* So return immediately - if a flush I/O is in progress we will flush
|
||||
* more data after that is completed, or a partial buffer will get flushed
|
||||
* when it is either filled or the file is synced.
|
||||
*/
|
||||
free_fs_request(req);
|
||||
if (next == NULL) {
|
||||
@ -2074,6 +2089,11 @@ __file_flush(void *ctx)
|
||||
if (length == 0) {
|
||||
free_fs_request(req);
|
||||
pthread_spin_unlock(&file->lock);
|
||||
/*
|
||||
* There is no data to flush, but we still need to check for any
|
||||
* outstanding sync requests to make sure metadata gets updated.
|
||||
*/
|
||||
__check_sync_reqs(file);
|
||||
return;
|
||||
}
|
||||
args->op.flush.length = length;
|
||||
@ -2436,6 +2456,8 @@ spdk_file_read(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
|
||||
if (length > (final_offset - offset)) {
|
||||
length = final_offset - offset;
|
||||
}
|
||||
|
||||
sub_reads++;
|
||||
rc = __file_read(file, payload, offset, length, channel);
|
||||
if (rc == 0) {
|
||||
final_length += length;
|
||||
@ -2444,7 +2466,6 @@ spdk_file_read(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx,
|
||||
}
|
||||
payload += length;
|
||||
offset += length;
|
||||
sub_reads++;
|
||||
}
|
||||
pthread_spin_unlock(&file->lock);
|
||||
while (sub_reads-- > 0) {
|
||||
@ -2469,8 +2490,8 @@ _file_sync(struct spdk_file *file, struct spdk_fs_channel *channel,
|
||||
BLOBFS_TRACE(file, "offset=%jx\n", file->append_pos);
|
||||
|
||||
pthread_spin_lock(&file->lock);
|
||||
if (file->append_pos <= file->length_flushed) {
|
||||
BLOBFS_TRACE(file, "done - no data to flush\n");
|
||||
if (file->append_pos <= file->length_xattr) {
|
||||
BLOBFS_TRACE(file, "done - file already synced\n");
|
||||
pthread_spin_unlock(&file->lock);
|
||||
cb_fn(cb_arg, 0);
|
||||
return;
|
||||
@ -2639,7 +2660,7 @@ spdk_file_close(struct spdk_file *file, struct spdk_fs_thread_ctx *ctx)
|
||||
args->file = file;
|
||||
args->sem = &channel->sem;
|
||||
args->fn.file_op = __wake_caller;
|
||||
args->arg = req;
|
||||
args->arg = args;
|
||||
channel->send_request(__file_close, req);
|
||||
sem_wait(&channel->sem);
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "env_internal.h"
|
||||
|
||||
#include <rte_config.h>
|
||||
#include <rte_memory.h>
|
||||
#include <rte_eal_memconfig.h>
|
||||
|
||||
#include "spdk_internal/assert.h"
|
||||
|
@ -623,7 +623,7 @@ spdk_app_start(struct spdk_app_opts *opts, spdk_msg_fn start_fn,
|
||||
goto app_start_setup_conf_err;
|
||||
}
|
||||
|
||||
spdk_log_open();
|
||||
spdk_log_open(opts->log);
|
||||
SPDK_NOTICELOG("Total cores available: %d\n", spdk_env_get_core_count());
|
||||
|
||||
/*
|
||||
|
@ -1452,6 +1452,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
|
||||
"max_srq_depth", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_srq_depth),
|
||||
spdk_json_decode_uint32, true
|
||||
},
|
||||
{
|
||||
"no_srq", offsetof(struct nvmf_rpc_create_transport_ctx, opts.no_srq),
|
||||
spdk_json_decode_bool, true
|
||||
},
|
||||
};
|
||||
|
||||
static void
|
||||
@ -1585,6 +1589,10 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t
|
||||
spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
|
||||
spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
|
||||
spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
|
||||
if (type == SPDK_NVME_TRANSPORT_RDMA) {
|
||||
spdk_json_write_named_uint32(w, "max_srq_depth", opts->max_srq_depth);
|
||||
spdk_json_write_named_bool(w, "no_srq", opts->no_srq);
|
||||
}
|
||||
|
||||
spdk_json_write_object_end(w);
|
||||
}
|
||||
|
@ -402,7 +402,7 @@ ftl_anm_unregister_device(struct spdk_ftl_dev *dev)
|
||||
|
||||
pthread_mutex_lock(&g_anm.lock);
|
||||
ctrlr = ftl_anm_find_ctrlr(&g_anm, dev->ctrlr);
|
||||
|
||||
assert(ctrlr != NULL);
|
||||
pthread_mutex_lock(&ctrlr->lock);
|
||||
|
||||
LIST_FOREACH_SAFE(poller, &ctrlr->pollers, list_entry, temp_poller) {
|
||||
|
@ -368,6 +368,7 @@ spdk_iscsi_conn_free_pdu(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pd
|
||||
conn->data_in_cnt--;
|
||||
spdk_iscsi_task_put(spdk_iscsi_task_get_primary(pdu->task));
|
||||
}
|
||||
spdk_iscsi_conn_handle_queued_datain_tasks(conn);
|
||||
}
|
||||
} else if (pdu->bhs.opcode == ISCSI_OP_SCSI_RSP &&
|
||||
pdu->task->scsi.status != SPDK_SCSI_STATUS_GOOD) {
|
||||
@ -678,7 +679,13 @@ _iscsi_conn_remove_lun(void *arg1, void *arg2)
|
||||
|
||||
spdk_clear_all_transfer_task(conn, lun, NULL);
|
||||
TAILQ_FOREACH_SAFE(pdu, &conn->write_pdu_list, tailq, tmp_pdu) {
|
||||
if (pdu->task && (lun == pdu->task->scsi.lun)) {
|
||||
/* If the pdu's LUN matches the LUN that was removed, free this
|
||||
* PDU immediately. If the pdu's LUN is NULL, then we know
|
||||
* the datain handling code already detected the hot removal,
|
||||
* so we can free that PDU as well.
|
||||
*/
|
||||
if (pdu->task &&
|
||||
(lun == pdu->task->scsi.lun || NULL == pdu->task->scsi.lun)) {
|
||||
TAILQ_REMOVE(&conn->write_pdu_list, pdu, tailq);
|
||||
spdk_iscsi_conn_free_pdu(conn, pdu);
|
||||
}
|
||||
@ -1061,8 +1068,6 @@ process_read_task_completion(struct spdk_iscsi_conn *conn,
|
||||
spdk_iscsi_task_put(task);
|
||||
}
|
||||
process_completed_read_subtask_list(conn, primary);
|
||||
|
||||
spdk_iscsi_conn_handle_queued_datain_tasks(conn);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1309,6 +1309,7 @@ iscsi_parse_globals(void)
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("spdk_initialize_all_pools() failed\n");
|
||||
free(g_spdk_iscsi.session);
|
||||
g_spdk_iscsi.session = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1316,6 +1317,7 @@ iscsi_parse_globals(void)
|
||||
if (rc < 0) {
|
||||
SPDK_ERRLOG("spdk_initialize_iscsi_conns() failed\n");
|
||||
free(g_spdk_iscsi.session);
|
||||
g_spdk_iscsi.session = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -50,16 +50,24 @@ static const char *const spdk_level_names[] = {
|
||||
|
||||
#define MAX_TMPBUF 1024
|
||||
|
||||
static logfunc *g_log = NULL;
|
||||
|
||||
void
|
||||
spdk_log_open(void)
|
||||
spdk_log_open(logfunc *logf)
|
||||
{
|
||||
openlog("spdk", LOG_PID, LOG_LOCAL7);
|
||||
if (logf) {
|
||||
g_log = logf;
|
||||
} else {
|
||||
openlog("spdk", LOG_PID, LOG_LOCAL7);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
spdk_log_close(void)
|
||||
{
|
||||
closelog();
|
||||
if (!g_log) {
|
||||
closelog();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef SPDK_LOG_BACKTRACE_LVL
|
||||
@ -126,20 +134,25 @@ spdk_log(enum spdk_log_level level, const char *file, const int line, const char
|
||||
return;
|
||||
}
|
||||
|
||||
va_start(ap, format);
|
||||
if (g_log) {
|
||||
g_log(level, file, line, func, format);
|
||||
|
||||
vsnprintf(buf, sizeof(buf), format, ap);
|
||||
} else {
|
||||
va_start(ap, format);
|
||||
|
||||
if (level <= g_spdk_log_print_level) {
|
||||
fprintf(stderr, "%s:%4d:%s: *%s*: %s", file, line, func, spdk_level_names[level], buf);
|
||||
spdk_log_unwind_stack(stderr, level);
|
||||
vsnprintf(buf, sizeof(buf), format, ap);
|
||||
|
||||
if (level <= g_spdk_log_print_level) {
|
||||
fprintf(stderr, "%s:%4d:%s: *%s*: %s", file, line, func, spdk_level_names[level], buf);
|
||||
spdk_log_unwind_stack(stderr, level);
|
||||
}
|
||||
|
||||
if (level <= g_spdk_log_level) {
|
||||
syslog(severity, "%s:%4d:%s: *%s*: %s", file, line, func, spdk_level_names[level], buf);
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
if (level <= g_spdk_log_level) {
|
||||
syslog(severity, "%s:%4d:%s: *%s*: %s", file, line, func, spdk_level_names[level], buf);
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -881,6 +881,7 @@ nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
req->payload_size = payload_size;
|
||||
req->qpair = qpair;
|
||||
req->pid = g_spdk_nvme_pid;
|
||||
req->submit_tick = 0;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
@ -1137,7 +1137,6 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
|
||||
struct ibv_mr *mr;
|
||||
uint32_t length;
|
||||
uint64_t requested_size;
|
||||
uint32_t remaining_payload;
|
||||
void *virt_addr;
|
||||
int rc, i;
|
||||
|
||||
@ -1147,48 +1146,42 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
|
||||
assert(req->payload.next_sge_fn != NULL);
|
||||
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
|
||||
|
||||
remaining_payload = req->payload_size;
|
||||
rdma_req->send_wr.num_sge = 1;
|
||||
|
||||
do {
|
||||
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
|
||||
if (rc) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (length > remaining_payload) {
|
||||
length = remaining_payload;
|
||||
}
|
||||
|
||||
requested_size = length;
|
||||
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)virt_addr,
|
||||
&requested_size);
|
||||
if (mr == NULL || requested_size < length) {
|
||||
for (i = 1; i < rdma_req->send_wr.num_sge; i++) {
|
||||
rdma_req->send_sgl[i].addr = 0;
|
||||
rdma_req->send_sgl[i].length = 0;
|
||||
rdma_req->send_sgl[i].lkey = 0;
|
||||
}
|
||||
|
||||
if (mr) {
|
||||
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
rdma_req->send_sgl[rdma_req->send_wr.num_sge].addr = (uint64_t)virt_addr;
|
||||
rdma_req->send_sgl[rdma_req->send_wr.num_sge].length = length;
|
||||
rdma_req->send_sgl[rdma_req->send_wr.num_sge].lkey = mr->lkey;
|
||||
rdma_req->send_wr.num_sge++;
|
||||
|
||||
remaining_payload -= length;
|
||||
} while (remaining_payload && rdma_req->send_wr.num_sge < (int64_t)rqpair->max_send_sge);
|
||||
|
||||
if (remaining_payload) {
|
||||
SPDK_ERRLOG("Unable to prepare request. Too many SGL elements\n");
|
||||
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
|
||||
if (rc) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (length < req->payload_size) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Inline SGL request split so sending separately.\n");
|
||||
return nvme_rdma_build_sgl_request(rqpair, rdma_req);
|
||||
}
|
||||
|
||||
if (length > req->payload_size) {
|
||||
length = req->payload_size;
|
||||
}
|
||||
|
||||
requested_size = length;
|
||||
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)virt_addr,
|
||||
&requested_size);
|
||||
if (mr == NULL || requested_size < length) {
|
||||
for (i = 1; i < rdma_req->send_wr.num_sge; i++) {
|
||||
rdma_req->send_sgl[i].addr = 0;
|
||||
rdma_req->send_sgl[i].length = 0;
|
||||
rdma_req->send_sgl[i].lkey = 0;
|
||||
}
|
||||
|
||||
if (mr) {
|
||||
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
|
||||
rdma_req->send_sgl[1].length = length;
|
||||
rdma_req->send_sgl[1].lkey = mr->lkey;
|
||||
|
||||
rdma_req->send_wr.num_sge = 2;
|
||||
|
||||
/* The first element of this SGL is pointing at an
|
||||
* spdk_nvmf_cmd object. For this particular command,
|
||||
* we only need the first 64 bytes corresponding to
|
||||
|
@ -429,6 +429,8 @@ struct spdk_nvmf_rdma_device {
|
||||
struct spdk_mem_map *map;
|
||||
struct ibv_pd *pd;
|
||||
|
||||
int num_srq;
|
||||
|
||||
TAILQ_ENTRY(spdk_nvmf_rdma_device) link;
|
||||
};
|
||||
|
||||
@ -2041,12 +2043,13 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH 128
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH 128
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH 4096
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 64
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072
|
||||
#define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS 4096
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE 32
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_NO_SRQ false;
|
||||
|
||||
static void
|
||||
spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
@ -2060,6 +2063,7 @@ spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
opts->num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
|
||||
opts->buf_cache_size = SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
|
||||
opts->max_srq_depth = SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
|
||||
opts->no_srq = SPDK_NVMF_RDMA_DEFAULT_NO_SRQ
|
||||
}
|
||||
|
||||
const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
|
||||
@ -2103,7 +2107,7 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
|
||||
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
|
||||
" in_capsule_data_size=%d, max_aq_depth=%d,\n"
|
||||
" num_shared_buffers=%d, max_srq_depth=%d\n",
|
||||
" num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d\n",
|
||||
opts->max_queue_depth,
|
||||
opts->max_io_size,
|
||||
opts->max_qpairs_per_ctrlr,
|
||||
@ -2111,7 +2115,8 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
opts->in_capsule_data_size,
|
||||
opts->max_aq_depth,
|
||||
opts->num_shared_buffers,
|
||||
opts->max_srq_depth);
|
||||
opts->max_srq_depth,
|
||||
opts->no_srq);
|
||||
|
||||
/* I/O unit size cannot be larger than max I/O size */
|
||||
if (opts->io_unit_size > opts->max_io_size) {
|
||||
@ -2940,9 +2945,10 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
TAILQ_INIT(&poller->qpairs);
|
||||
|
||||
TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
|
||||
if (device->attr.max_srq != 0) {
|
||||
if (transport->opts.no_srq == false && device->num_srq < device->attr.max_srq) {
|
||||
poller->max_srq_depth = transport->opts.max_srq_depth;
|
||||
|
||||
device->num_srq++;
|
||||
memset(&srq_init_attr, 0, sizeof(struct ibv_srq_init_attr));
|
||||
srq_init_attr.attr.max_wr = poller->max_srq_depth;
|
||||
srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
|
||||
|
@ -2117,7 +2117,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
i++;
|
||||
}
|
||||
|
||||
assert(tcp_req->req.iovcnt < SPDK_NVMF_MAX_SGL_ENTRIES);
|
||||
assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES);
|
||||
tcp_req->data_from_pool = true;
|
||||
return 0;
|
||||
|
||||
@ -2784,7 +2784,7 @@ spdk_nvmf_tcp_qpair_set_sq_size(struct spdk_nvmf_qpair *qpair)
|
||||
|
||||
#define SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH 128
|
||||
#define SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH 128
|
||||
#define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 64
|
||||
#define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
|
||||
#define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
|
||||
#define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072
|
||||
#define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072
|
||||
|
@ -319,6 +319,7 @@ spdk_rpc_get_version(struct spdk_jsonrpc_request *request, const struct spdk_jso
|
||||
if (params != NULL) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
||||
"get_spdk_version method requires no parameters");
|
||||
return;
|
||||
}
|
||||
|
||||
w = spdk_jsonrpc_begin_result(request);
|
||||
|
@ -506,6 +506,7 @@ spdk_posix_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct
|
||||
#if defined(__linux__)
|
||||
struct epoll_event event;
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.events = EPOLLIN;
|
||||
event.data.ptr = sock;
|
||||
|
||||
|
@ -557,6 +557,7 @@ spdk_vpp_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct sp
|
||||
assert(group != NULL);
|
||||
assert(g_vpp_initialized);
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.events = EPOLLIN;
|
||||
event.data.ptr = sock;
|
||||
|
||||
|
@ -780,7 +780,6 @@ spdk_for_each_thread(spdk_msg_fn fn, void *ctx, spdk_msg_fn cpl)
|
||||
ct->ctx = ctx;
|
||||
ct->cpl = cpl;
|
||||
|
||||
pthread_mutex_lock(&g_devlist_mutex);
|
||||
thread = _get_thread();
|
||||
if (!thread) {
|
||||
SPDK_ERRLOG("No thread allocated\n");
|
||||
@ -789,6 +788,8 @@ spdk_for_each_thread(spdk_msg_fn fn, void *ctx, spdk_msg_fn cpl)
|
||||
return;
|
||||
}
|
||||
ct->orig_thread = thread;
|
||||
|
||||
pthread_mutex_lock(&g_devlist_mutex);
|
||||
ct->cur_thread = TAILQ_FIRST(&g_threads);
|
||||
pthread_mutex_unlock(&g_devlist_mutex);
|
||||
|
||||
|
@ -220,7 +220,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
|
||||
return;
|
||||
}
|
||||
|
||||
vid = vhost_new_device(vsocket->features);
|
||||
vid = vhost_new_device(vsocket->features, vsocket->notify_ops);
|
||||
if (vid == -1) {
|
||||
goto err;
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ reset_device(struct virtio_net *dev)
|
||||
* there is a new virtio device being attached).
|
||||
*/
|
||||
int
|
||||
vhost_new_device(uint64_t features)
|
||||
vhost_new_device(uint64_t features, struct vhost_device_ops const *ops)
|
||||
{
|
||||
struct virtio_net *dev;
|
||||
int i;
|
||||
@ -207,6 +207,7 @@ vhost_new_device(uint64_t features)
|
||||
vhost_devices[i] = dev;
|
||||
dev->vid = i;
|
||||
dev->features = features;
|
||||
dev->notify_ops = ops;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -167,6 +167,13 @@ struct guest_page {
|
||||
uint64_t size;
|
||||
};
|
||||
|
||||
/* struct ether_addr was renamed to struct rte_ether_addr at one point */
|
||||
#ifdef RTE_ETHER_ADDR_LEN
|
||||
struct ether_addr {
|
||||
uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
|
||||
} __attribute__((__packed__));
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Device structure contains all configuration information relating
|
||||
* to the device.
|
||||
@ -301,7 +308,7 @@ gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
|
||||
|
||||
struct virtio_net *get_device(int vid);
|
||||
|
||||
int vhost_new_device(uint64_t features);
|
||||
int vhost_new_device(uint64_t features, struct vhost_device_ops const *ops);
|
||||
void cleanup_device(struct virtio_net *dev, int destroy);
|
||||
void reset_device(struct virtio_net *dev);
|
||||
void vhost_destroy_device(int);
|
||||
|
@ -1217,16 +1217,6 @@ vhost_user_msg_handler(int vid, int fd)
|
||||
if (dev == NULL)
|
||||
return -1;
|
||||
|
||||
if (!dev->notify_ops) {
|
||||
dev->notify_ops = vhost_driver_callback_get(dev->ifname);
|
||||
if (!dev->notify_ops) {
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"failed to get callback ops for driver %s\n",
|
||||
dev->ifname);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
ret = read_vhost_message(fd, &msg);
|
||||
if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
|
||||
if (ret < 0)
|
||||
|
@ -83,6 +83,7 @@ COMMON_CFLAGS += -march=native
|
||||
endif
|
||||
ifeq ($(TARGET_MACHINE),aarch64)
|
||||
COMMON_CFLAGS += -march=armv8-a+crc
|
||||
COMMON_CFLAGS += -DPAGE_SIZE=$(shell getconf PAGESIZE)
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_WERROR), y)
|
||||
@ -265,7 +266,7 @@ SO_SUFFIX_ALL := $(SO_VER).$(SO_MINOR)
|
||||
# Provide function to ease build of a shared lib
|
||||
define spdk_build_realname_shared_lib
|
||||
$(CC) -o $@ -shared $(CPPFLAGS) $(LDFLAGS) \
|
||||
-Wl,--soname,$(patsubst %.so.$(SO_SUFFIX_ALL),%.so.$(SO_VER),$(notdir $@)) \
|
||||
-Wl,--soname,$(patsubst %.so.$(SO_SUFFIX_ALL),%.so.$(SO_SUFFIX_ALL),$(notdir $@)) \
|
||||
-Wl,--whole-archive $(1) -Wl,--no-whole-archive \
|
||||
-Wl,--version-script=$(2) \
|
||||
$(3)
|
||||
|
2
ocf
2
ocf
@ -1 +1 @@
|
||||
Subproject commit bd19b9c12f924b3bfd5d228c3dc4a16d807595d0
|
||||
Subproject commit 515137f25ec71dca0c268fbd1437dd7d177e4f8d
|
@ -2,12 +2,12 @@
|
||||
%bcond_with doc
|
||||
|
||||
Name: spdk
|
||||
Version: 19.04
|
||||
Version: 19.04.x
|
||||
Release: 0%{?dist}
|
||||
Epoch: 0
|
||||
URL: http://spdk.io
|
||||
|
||||
Source: https://github.com/spdk/spdk/archive/v19.04.tar.gz
|
||||
Source: https://github.com/spdk/spdk/archive/v19.04.x.tar.gz
|
||||
Summary: Set of libraries and utilities for high performance user-mode storage
|
||||
|
||||
%define package_version %{epoch}:%{version}-%{release}
|
||||
|
@ -61,3 +61,18 @@ Fio job parameters.
|
||||
- run_time: time (in seconds) to run workload
|
||||
- ramp_time: time (in seconds) to run workload before statistics are gathered
|
||||
- run_num: how many times to run given workload in loop
|
||||
|
||||
# Running Test
|
||||
Before running the test script use the setup.sh script to bind the devices you want to
|
||||
use in the test to the VFIO/UIO driver.
|
||||
Run the script on the NVMe-oF target system:
|
||||
|
||||
cd spdk
|
||||
sudo PYTHONPATH=$PYTHONPATH:$PWD/scripts scripts/perf/nvmf/run_nvmf.py
|
||||
|
||||
The script uses another spdk script (scripts/rpc.py) so we pass the path to rpc.py by setting the Python path
|
||||
as a runtime environment parameter.
|
||||
|
||||
# Test Results
|
||||
When the test completes, you will find a csv file (nvmf_results.csv) containing the results in the target node
|
||||
directory /tmp/results.
|
||||
|
@ -5,7 +5,7 @@
|
||||
"transport": "transport_type"
|
||||
},
|
||||
"target": {
|
||||
"rdma_ips": ["192.0.1.1", "192.0.2.1"],
|
||||
"nic_ips": ["192.0.1.1", "192.0.2.1"],
|
||||
"mode": "spdk",
|
||||
"use_null_block": false,
|
||||
"nvmet_dir": "/path/to/nvmetcli",
|
||||
@ -14,19 +14,19 @@
|
||||
},
|
||||
"initiator1": {
|
||||
"ip": "10.0.0.1",
|
||||
"rdma_ips": ["192.0.1.1"],
|
||||
"nic_ips": ["192.0.1.1"],
|
||||
"mode": "spdk",
|
||||
"nvmecli_dir": "/path/to/nvmecli"
|
||||
},
|
||||
"initiator2": {
|
||||
"ip": "10.0.0.2",
|
||||
"rdma_ips": ["192.0.2.1"],
|
||||
"nic_ips": ["192.0.2.1"],
|
||||
"mode": "spdk"
|
||||
},
|
||||
"fio": {
|
||||
"bs": ["4k"],
|
||||
"qd": [128],
|
||||
"rw": ["read"],
|
||||
"rw": ["randrw"],
|
||||
"rwmixread": 100,
|
||||
"run_time": 5,
|
||||
"ramp_time": 1,
|
||||
|
@ -159,6 +159,7 @@ class Target(Server):
|
||||
for row in rows:
|
||||
with open(os.path.join(results_dir, csv_file), "a") as fh:
|
||||
fh.write(row + "\n")
|
||||
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
|
||||
|
||||
def measure_sar(self, results_dir, sar_file_name):
|
||||
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
|
||||
@ -186,7 +187,7 @@ class Initiator(Server):
|
||||
self.nvmecli_bin = "nvme" # Use system-wide nvme-cli
|
||||
|
||||
self.ssh_connection = paramiko.SSHClient()
|
||||
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy)
|
||||
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
|
||||
self.remote_call("sudo rm -rf %s/nvmf_perf" % self.spdk_dir)
|
||||
self.remote_call("mkdir -p %s" % self.spdk_dir)
|
||||
|
@ -92,6 +92,8 @@ elif [ -f /etc/debian_version ]; then
|
||||
"Note: Some SPDK CLI dependencies could not be installed."
|
||||
# Additional dependencies for ISA-L used in compression
|
||||
apt-get install -y autoconf automake libtool help2man
|
||||
# Additional dependecies for nvmf performance test script
|
||||
apt-get install -y python3-paramiko
|
||||
elif [ -f /etc/SuSE-release ] || [ -f /etc/SUSE-brand ]; then
|
||||
zypper install -y gcc gcc-c++ make cunit-devel libaio-devel libopenssl-devel \
|
||||
git-core lcov python-base python-pycodestyle libuuid-devel sg3_utils pciutils
|
||||
|
@ -6,6 +6,7 @@ import logging
|
||||
import argparse
|
||||
import rpc
|
||||
import sys
|
||||
import shlex
|
||||
|
||||
try:
|
||||
from shlex import quote
|
||||
@ -182,7 +183,7 @@ if __name__ == "__main__":
|
||||
p = subparsers.add_parser('construct_ocf_bdev',
|
||||
help='Add an OCF block device')
|
||||
p.add_argument('name', help='Name of resulting OCF bdev')
|
||||
p.add_argument('mode', help='OCF cache mode', choices=['wt', 'pt'])
|
||||
p.add_argument('mode', help='OCF cache mode', choices=['wb', 'wt', 'pt'])
|
||||
p.add_argument('cache_bdev_name', help='Name of underlying cache bdev')
|
||||
p.add_argument('core_bdev_name', help='Name of unerlying core bdev')
|
||||
p.set_defaults(func=construct_ocf_bdev)
|
||||
@ -1403,7 +1404,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
max_aq_depth=args.max_aq_depth,
|
||||
num_shared_buffers=args.num_shared_buffers,
|
||||
buf_cache_size=args.buf_cache_size,
|
||||
max_srq_depth=args.max_srq_depth)
|
||||
max_srq_depth=args.max_srq_depth,
|
||||
no_srq=args.no_srq)
|
||||
|
||||
p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
|
||||
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
|
||||
@ -1416,6 +1418,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
|
||||
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
|
||||
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
|
||||
p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
|
||||
p.set_defaults(func=nvmf_create_transport)
|
||||
|
||||
def get_nvmf_transports(args):
|
||||
@ -1792,13 +1795,27 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
p.add_argument('-n', '--max', help="""Maximum number of notifications to return in response""", type=int)
|
||||
p.set_defaults(func=get_notifications)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
with rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper())) as client:
|
||||
def call_rpc_func(args):
|
||||
try:
|
||||
args.client = client
|
||||
args.func(args)
|
||||
except JSONRPCException as ex:
|
||||
print("Exception:")
|
||||
print(ex.message)
|
||||
exit(1)
|
||||
|
||||
def execute_script(parser, client, fd):
|
||||
for rpc_call in map(str.rstrip, fd):
|
||||
args = parser.parse_args(shlex.split(rpc_call))
|
||||
args.client = client
|
||||
call_rpc_func(args)
|
||||
|
||||
args = parser.parse_args()
|
||||
args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
|
||||
if hasattr(args, 'func'):
|
||||
call_rpc_func(args)
|
||||
elif sys.stdin.isatty():
|
||||
# No arguments and no data piped through stdin
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
else:
|
||||
execute_script(parser, args.client, sys.stdin)
|
||||
|
@ -73,7 +73,7 @@ def construct_ocf_bdev(client, name, mode, cache_bdev_name, core_bdev_name):
|
||||
|
||||
Args:
|
||||
name: name of constructed OCF bdev
|
||||
mode: OCF cache mode: {'wt', 'pt'}
|
||||
mode: OCF cache mode: {'wb', 'wt', 'pt'}
|
||||
cache_bdev_name: name of underlying cache bdev
|
||||
core_bdev_name: name of underlying core bdev
|
||||
|
||||
|
@ -45,7 +45,8 @@ def nvmf_create_transport(client,
|
||||
max_aq_depth=None,
|
||||
num_shared_buffers=None,
|
||||
buf_cache_size=None,
|
||||
max_srq_depth=None):
|
||||
max_srq_depth=None,
|
||||
no_srq=False):
|
||||
"""NVMf Transport Create options.
|
||||
|
||||
Args:
|
||||
@ -57,8 +58,9 @@ def nvmf_create_transport(client,
|
||||
io_unit_size: I/O unit size in bytes (optional)
|
||||
max_aq_depth: Max size admin quque per controller (optional)
|
||||
num_shared_buffers: The number of pooled data buffers available to the transport (optional)
|
||||
buf_cache_size: The number of shared buffers to reserve for each poll group(optional)
|
||||
max_srq_depth: Max number of outstanding I/O per shared receive queue (optional)
|
||||
buf_cache_size: The number of shared buffers to reserve for each poll group (optional)
|
||||
max_srq_depth: Max number of outstanding I/O per shared receive queue - RDMA specific (optional)
|
||||
no_srq: Boolean flag to disable SRQ even for devices that support it - RDMA specific (optional)
|
||||
|
||||
Returns:
|
||||
True or False
|
||||
@ -84,6 +86,8 @@ def nvmf_create_transport(client,
|
||||
params['buf_cache_size'] = buf_cache_size
|
||||
if max_srq_depth:
|
||||
params['max_srq_depth'] = max_srq_depth
|
||||
if no_srq:
|
||||
params['no_srq'] = no_srq
|
||||
return client.call('nvmf_create_transport', params)
|
||||
|
||||
|
||||
|
@ -4,7 +4,8 @@ set -e
|
||||
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../..)
|
||||
rpc_py="$rootdir/scripts/rpc.py"
|
||||
rpc_server=/var/tmp/spdk-raid.sock
|
||||
rpc_py="$rootdir/scripts/rpc.py -s $rpc_server"
|
||||
tmp_file=/tmp/raidrandtest
|
||||
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
@ -54,30 +55,35 @@ function on_error_exit() {
|
||||
killprocess $raid_pid
|
||||
fi
|
||||
|
||||
rm -f $testdir/bdev.conf
|
||||
rm -f $tmp_file
|
||||
print_backtrace
|
||||
exit 1
|
||||
}
|
||||
|
||||
function configure_raid_bdev() {
|
||||
rm -rf $testdir/rpcs.txt
|
||||
|
||||
echo construct_malloc_bdev 32 512 -b Base_1 >> $testdir/rpcs.txt
|
||||
echo construct_malloc_bdev 32 512 -b Base_2 >> $testdir/rpcs.txt
|
||||
echo construct_raid_bdev -z 64 -r 0 -b \"Base_1 Base_2\" -n raid0 >> $testdir/rpcs.txt
|
||||
$rpc_py < $testdir/rpcs.txt
|
||||
|
||||
rm -rf $testdir/rpcs.txt
|
||||
}
|
||||
|
||||
function raid_function_test() {
|
||||
if [ $(uname -s) = Linux ] && modprobe -n nbd; then
|
||||
local rpc_server=/var/tmp/spdk-raid.sock
|
||||
local conf=$1
|
||||
local nbd=/dev/nbd0
|
||||
local raid_bdev
|
||||
|
||||
if [ ! -e $conf ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
modprobe nbd
|
||||
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -c ${conf} -L bdev_raid &
|
||||
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -L bdev_raid &
|
||||
raid_pid=$!
|
||||
echo "Process raid pid: $raid_pid"
|
||||
waitforlisten $raid_pid $rpc_server
|
||||
|
||||
raid_bdev=$($rootdir/scripts/rpc.py -s $rpc_server get_raid_bdevs online | cut -d ' ' -f 1)
|
||||
configure_raid_bdev
|
||||
raid_bdev=$($rpc_py get_raid_bdevs online | cut -d ' ' -f 1)
|
||||
if [ $raid_bdev = "" ]; then
|
||||
echo "No raid0 device in SPDK app"
|
||||
return 1
|
||||
@ -106,10 +112,8 @@ function raid_function_test() {
|
||||
timing_enter bdev_raid
|
||||
trap 'on_error_exit;' ERR
|
||||
|
||||
cp $testdir/bdev.conf.in $testdir/bdev.conf
|
||||
raid_function_test $testdir/bdev.conf
|
||||
raid_function_test
|
||||
|
||||
rm -f $testdir/bdev.conf
|
||||
rm -f $tmp_file
|
||||
report_test_completion "bdev_raid"
|
||||
timing_exit bdev_raid
|
||||
|
@ -824,7 +824,12 @@ blockdev_test_reset(void)
|
||||
target = g_io_targets;
|
||||
while (target != NULL) {
|
||||
blockdev_reset(target);
|
||||
CU_ASSERT_EQUAL(g_completion_success, true);
|
||||
/* Workaround: NVMe-oF target doesn't support reset yet - so for now
|
||||
* don't fail the test if it's an NVMe bdev.
|
||||
*/
|
||||
if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
|
||||
CU_ASSERT_EQUAL(g_completion_success, true);
|
||||
}
|
||||
|
||||
target = target->next;
|
||||
}
|
||||
|
1
test/blobfs/rocksdb/lsan_suppressions.txt
Normal file
1
test/blobfs/rocksdb/lsan_suppressions.txt
Normal file
@ -0,0 +1 @@
|
||||
leak:spdk_fs_alloc_thread_ctx
|
@ -13,6 +13,11 @@ run_step() {
|
||||
echo "--spdk_cache_size=$CACHE_SIZE" >> "$1"_flags.txt
|
||||
|
||||
echo -n Start $1 test phase...
|
||||
# ASAN has some bugs around thread_local variables. We have a destructor in place
|
||||
# to free the thread contexts, but ASAN complains about the leak before those
|
||||
# destructors have a chance to run. So suppress this one specific leak using
|
||||
# LSAN_OPTIONS.
|
||||
export LSAN_OPTIONS="suppressions=$testdir/lsan_suppressions.txt"
|
||||
/usr/bin/time taskset 0xFF $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
echo done.
|
||||
}
|
||||
|
@ -73,6 +73,11 @@ else
|
||||
export DEPENDENCY_DIR
|
||||
fi
|
||||
|
||||
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
|
||||
# blockdev.sh in SPDK 19.04 is not able to run with ASAN
|
||||
export SPDK_RUN_ASAN=0
|
||||
fi
|
||||
|
||||
if [ ! -z "$HUGEMEM" ]; then
|
||||
export HUGEMEM
|
||||
fi
|
||||
|
@ -56,11 +56,11 @@ function tgt_check_notifications() {
|
||||
# remove ID
|
||||
event="${event_line%:*}"
|
||||
|
||||
ev_type=${event%*:}
|
||||
ev_ctx=${event#:*}
|
||||
ev_type=${event%:*}
|
||||
ev_ctx=${event#*:}
|
||||
|
||||
ex_ev_type=${1%*:}
|
||||
ex_ev_ctx=${1#:*}
|
||||
ex_ev_type=${1%%:*}
|
||||
ex_ev_ctx=${1#*:}
|
||||
|
||||
last_event_id=${event_line##*:}
|
||||
|
||||
@ -401,8 +401,21 @@ function json_config_clear() {
|
||||
# Check if config is clean.
|
||||
# Global params can't be cleared so need to filter them out.
|
||||
local config_filter="$rootdir/test/json_config/config_filter.py"
|
||||
$rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | \
|
||||
$config_filter -method delete_global_parameters | $config_filter -method check_empty
|
||||
|
||||
# RPC's used to cleanup configuration (e.g. to delete split and nvme bdevs)
|
||||
# complete immediately and they don't wait for the unregister callback.
|
||||
# It causes that configuration may not be fully cleaned at this moment and
|
||||
# we should to wait a while. (See github issue #789)
|
||||
count=100
|
||||
while [ $count -gt 0 ] ; do
|
||||
$rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | $config_filter -method delete_global_parameters | $config_filter -method check_empty && break
|
||||
count=$(( $count -1 ))
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
if [ $count -eq 0 ] ; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
on_error_exit() {
|
||||
|
@ -1300,12 +1300,8 @@ class TestCases(object):
|
||||
fail_count += self.c.snapshot_lvol_bdev(clone_bdev['name'], snapshot_name2)
|
||||
snapshot_bdev2 = self.c.get_lvol_bdev_with_name(self.lvs_name + "/" + snapshot_name2)
|
||||
|
||||
# Try to destroy snapshots with clones and check if it fails
|
||||
# Try to destroy snapshot with 2 clones and check if it fails
|
||||
ret_value = self.c.destroy_lvol_bdev(snapshot_bdev['name'])
|
||||
if ret_value == 0:
|
||||
print("ERROR: Delete snapshot should fail but didn't")
|
||||
fail_count += 1
|
||||
ret_value = self.c.destroy_lvol_bdev(snapshot_bdev2['name'])
|
||||
if ret_value == 0:
|
||||
print("ERROR: Delete snapshot should fail but didn't")
|
||||
fail_count += 1
|
||||
@ -2078,6 +2074,9 @@ class TestCases(object):
|
||||
self.c.construct_aio_bdev(aio_bdev0, base_name_1M, 4096)
|
||||
self.c.construct_aio_bdev(aio_bdev1, base_name_32M, 4096)
|
||||
|
||||
# wait 1 second to allow time for lvolstore tasting
|
||||
sleep(1)
|
||||
|
||||
# Check if configuration was properly loaded after tasting
|
||||
# get all info all lvs and lvol bdevs, compare with previous info
|
||||
new_bdevs = sorted(self.c.get_lvol_bdevs(), key=lambda x: x["name"])
|
||||
@ -2704,12 +2703,6 @@ class TestCases(object):
|
||||
fail_count += self.c.snapshot_lvol_bdev(lvol_bdev['name'], snapshot_name)
|
||||
snapshot_bdev = self.c.get_lvol_bdev_with_name(self.lvs_name + "/" + snapshot_name)
|
||||
|
||||
# Try to destroy snapshot and check if it fails
|
||||
ret_value = self.c.destroy_lvol_bdev(snapshot_bdev['name'])
|
||||
if ret_value == 0:
|
||||
print("ERROR: Delete snapshot should fail but didn't")
|
||||
fail_count += 1
|
||||
|
||||
# Decouple parent lvol bdev
|
||||
fail_count += self.c.decouple_parent_lvol_bdev(lvol_bdev['name'])
|
||||
lvol_bdev = self.c.get_lvol_bdev_with_name(uuid_bdev0)
|
||||
@ -2807,12 +2800,6 @@ class TestCases(object):
|
||||
fail_count += self.run_fio_test(nbd_name, begin_fill * MEGABYTE,
|
||||
fill_range * MEGABYTE, "read", pattern[i])
|
||||
|
||||
# Delete snapshot and check if it fails
|
||||
ret_value = self.c.destroy_lvol_bdev(snapshot_bdev2['name'])
|
||||
if ret_value == 0:
|
||||
print("ERROR: Delete snapshot should fail but didn't")
|
||||
fail_count += 1
|
||||
|
||||
# Decouple parent
|
||||
fail_count += self.c.decouple_parent_lvol_bdev(lvol_bdev['name'])
|
||||
lvol_bdev = self.c.get_lvol_bdev_with_name(uuid_bdev0)
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "spdk/nvme.h"
|
||||
#include "spdk/env.h"
|
||||
#include "spdk/string.h"
|
||||
#include "spdk/pci_ids.h"
|
||||
|
||||
struct ctrlr_entry {
|
||||
struct spdk_nvme_ctrlr *ctrlr;
|
||||
@ -81,6 +82,7 @@ static struct ctrlr_entry *g_controllers = NULL;
|
||||
static struct ns_entry *g_namespaces = NULL;
|
||||
static int g_num_namespaces = 0;
|
||||
static struct worker_thread *g_workers = NULL;
|
||||
static bool g_qemu_ssd_found = false;
|
||||
|
||||
static uint64_t g_tsc_rate;
|
||||
|
||||
@ -525,6 +527,22 @@ static void
|
||||
attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
||||
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
|
||||
{
|
||||
if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
||||
struct spdk_pci_device *dev = spdk_nvme_ctrlr_get_pci_device(ctrlr);
|
||||
|
||||
/* QEMU emulated SSDs can't handle this test, so we will skip
|
||||
* them. QEMU NVMe SSDs report themselves as VID == Intel. So we need
|
||||
* to check this specific 0x5845 device ID to know whether it's QEMU
|
||||
* or not.
|
||||
*/
|
||||
if (spdk_pci_device_get_vendor_id(dev) == SPDK_PCI_VID_INTEL &&
|
||||
spdk_pci_device_get_device_id(dev) == 0x5845) {
|
||||
g_qemu_ssd_found = true;
|
||||
printf("Skipping QEMU NVMe SSD at %s\n", trid->traddr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
register_ctrlr(ctrlr);
|
||||
}
|
||||
|
||||
@ -656,7 +674,7 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!g_controllers) {
|
||||
printf("No NVMe controller found, %s exiting\n", argv[0]);
|
||||
return 1;
|
||||
return g_qemu_ssd_found ? 0 : 1;
|
||||
}
|
||||
|
||||
task_pool = spdk_mempool_create("task_pool", TASK_POOL_NUM,
|
||||
|
@ -38,11 +38,9 @@ waitforlisten $nvmfpid
|
||||
$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
|
||||
timing_exit start_nvmf_tgt
|
||||
|
||||
bdevs="$bdevs $($rpc_py construct_malloc_bdev 64 512)"
|
||||
$rpc_py construct_malloc_bdev 64 512 -b Malloc1
|
||||
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
|
||||
for bdev in $bdevs; do
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
|
||||
done
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
|
||||
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
|
||||
|
||||
PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
|
||||
@ -62,24 +60,20 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
|
||||
$rpc_py construct_nvme_bdev -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP
|
||||
ls_guid=$($rpc_py construct_lvol_store Nvme0n1 lvs_0)
|
||||
get_lvs_free_mb $ls_guid
|
||||
lb_guid=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb)
|
||||
$rpc_py construct_lvol_bdev -l lvs_0 lbd_0 $free_mb
|
||||
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode2 -a -s SPDK00000000000001
|
||||
for bdev in $lb_guid; do
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 $bdev
|
||||
done
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 lvs_0/lbd_0
|
||||
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
|
||||
LD_PRELOAD=$PLUGIN_DIR/fio_plugin /usr/src/fio/fio $PLUGIN_DIR/example_config.fio --filename="trtype=RDMA adrfam=IPv4 \
|
||||
traddr=$NVMF_FIRST_TARGET_IP trsvcid=4420 ns=1"
|
||||
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
|
||||
|
||||
# Test fio_plugin as host with nvme lvol nested backend
|
||||
ls_nested_guid=$($rpc_py construct_lvol_store $lb_guid lvs_n_0)
|
||||
ls_nested_guid=$($rpc_py construct_lvol_store lvs_0/lbd_0 lvs_n_0)
|
||||
get_lvs_free_mb $ls_nested_guid
|
||||
lb_nested_guid=$($rpc_py construct_lvol_bdev -u $ls_nested_guid lbd_nest_0 $free_mb)
|
||||
$rpc_py construct_lvol_bdev -l lvs_n_0 lbd_nest_0 $free_mb
|
||||
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode3 -a -s SPDK00000000000001
|
||||
for bdev in $lb_nested_guid; do
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode3 $bdev
|
||||
done
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode3 lvs_n_0/lbd_nest_0
|
||||
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode3 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
|
||||
LD_PRELOAD=$PLUGIN_DIR/fio_plugin /usr/src/fio/fio $PLUGIN_DIR/example_config.fio --filename="trtype=RDMA adrfam=IPv4 \
|
||||
traddr=$NVMF_FIRST_TARGET_IP trsvcid=4420 ns=1"
|
||||
@ -87,9 +81,9 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
|
||||
|
||||
sync
|
||||
# Delete lvol_bdev and destroy lvol_store.
|
||||
$rpc_py destroy_lvol_bdev "$lb_nested_guid"
|
||||
$rpc_py destroy_lvol_bdev lvs_n_0/lbd_nest_0
|
||||
$rpc_py destroy_lvol_store -l lvs_n_0
|
||||
$rpc_py destroy_lvol_bdev "$lb_guid"
|
||||
$rpc_py destroy_lvol_bdev lvs_0/lbd_0
|
||||
$rpc_py destroy_lvol_store -l lvs_0
|
||||
$rpc_py delete_nvme_controller Nvme0
|
||||
fi
|
||||
|
@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
curdir=$(dirname $(readlink -f "$BASH_SOURCE"))
|
||||
rootdir=$(readlink -f $curdir/../../..)
|
||||
plugindir=$rootdir/examples/bdev/fio_plugin
|
||||
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
|
||||
function fio_verify(){
|
||||
LD_PRELOAD=$plugindir/fio_plugin /usr/src/fio/fio $curdir/test.fio --aux-path=/tmp/ --ioengine=spdk_bdev $@
|
||||
}
|
||||
|
||||
fio_verify --filename=MalCache1:MalCache2 --spdk_conf=$curdir/mallocs.conf
|
38
test/ocf/integrity/fio-modes.sh
Executable file
38
test/ocf/integrity/fio-modes.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
curdir=$(dirname $(readlink -f "$BASH_SOURCE"))
|
||||
rootdir=$(readlink -f $curdir/../../..)
|
||||
plugindir=$rootdir/examples/bdev/fio_plugin
|
||||
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
|
||||
function fio_verify(){
|
||||
LD_PRELOAD=$plugindir/fio_plugin /usr/src/fio/fio $curdir/test.fio --aux-path=/tmp/ --ioengine=spdk_bdev $@
|
||||
}
|
||||
|
||||
function cleanup(){
|
||||
rm -f $curdir/modes.conf
|
||||
}
|
||||
|
||||
trap "cleanup; exit 1" SIGINT SIGTERM EXIT
|
||||
|
||||
nvme_cfg=$($rootdir/scripts/gen_nvme.sh)
|
||||
|
||||
config="
|
||||
$nvme_cfg
|
||||
|
||||
[Split]
|
||||
Split Nvme0n1 8 101
|
||||
|
||||
[OCF]
|
||||
OCF PT_Nvme pt Nvme0n1p0 Nvme0n1p1
|
||||
OCF WT_Nvme wt Nvme0n1p2 Nvme0n1p3
|
||||
OCF WB_Nvme0 wb Nvme0n1p4 Nvme0n1p5
|
||||
OCF WB_Nvme1 wb Nvme0n1p6 Nvme0n1p7
|
||||
"
|
||||
echo "$config" > $curdir/modes.conf
|
||||
|
||||
fio_verify --filename=PT_Nvme:WT_Nvme:WB_Nvme0:WB_Nvme1 --spdk_conf=$curdir/modes.conf
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
cleanup
|
@ -2,7 +2,7 @@
|
||||
thread=1
|
||||
group_reporting=1
|
||||
direct=1
|
||||
norandommap=1
|
||||
serialize_overlap=1
|
||||
time_based=1
|
||||
do_verify=1
|
||||
verify=md5
|
||||
|
@ -7,16 +7,17 @@ source $rootdir/test/common/autotest_common.sh
|
||||
|
||||
function suite()
|
||||
{
|
||||
timing_enter "$@"
|
||||
timing_enter $(basename $@)
|
||||
run_test suite "$@"
|
||||
timing_exit "$@"
|
||||
timing_exit $(basename $@)
|
||||
}
|
||||
|
||||
timing_enter ocf
|
||||
|
||||
suite "$testdir/integrity/fio-mallocs.sh"
|
||||
suite "$testdir/integrity/fio-modes.sh"
|
||||
suite "$testdir/integrity/bdevperf-iotypes.sh"
|
||||
suite "$testdir/management/create-destruct.sh"
|
||||
suite "$testdir/management/multicore.sh"
|
||||
|
||||
timing_exit ocf
|
||||
report_test_completion "ocf"
|
||||
|
@ -225,11 +225,8 @@ bdev_ut_get_io_channel(void *ctx)
|
||||
return spdk_get_io_channel(&g_bdev_ut_io_device);
|
||||
}
|
||||
|
||||
static bool
|
||||
stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
DEFINE_STUB(stub_io_type_supported, static bool, (void *_bdev, enum spdk_bdev_io_type io_type),
|
||||
true);
|
||||
|
||||
static struct spdk_bdev_fn_table fn_table = {
|
||||
.destruct = stub_destruct,
|
||||
@ -755,6 +752,45 @@ io_wait_cb(void *arg)
|
||||
entry->submitted = true;
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_io_types_test(void)
|
||||
{
|
||||
struct spdk_bdev *bdev;
|
||||
struct spdk_bdev_desc *desc = NULL;
|
||||
struct spdk_io_channel *io_ch;
|
||||
struct spdk_bdev_opts bdev_opts = {
|
||||
.bdev_io_pool_size = 4,
|
||||
.bdev_io_cache_size = 2,
|
||||
};
|
||||
int rc;
|
||||
|
||||
rc = spdk_bdev_set_opts(&bdev_opts);
|
||||
CU_ASSERT(rc == 0);
|
||||
spdk_bdev_initialize(bdev_init_cb, NULL);
|
||||
poll_threads();
|
||||
|
||||
bdev = allocate_bdev("bdev0");
|
||||
|
||||
rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
|
||||
CU_ASSERT(rc == 0);
|
||||
poll_threads();
|
||||
SPDK_CU_ASSERT_FATAL(desc != NULL);
|
||||
io_ch = spdk_bdev_get_io_channel(desc);
|
||||
CU_ASSERT(io_ch != NULL);
|
||||
|
||||
/* WRITE and WRITE ZEROES are not supported */
|
||||
MOCK_SET(stub_io_type_supported, false);
|
||||
rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
|
||||
CU_ASSERT(rc == -ENOTSUP);
|
||||
MOCK_SET(stub_io_type_supported, true);
|
||||
|
||||
spdk_put_io_channel(io_ch);
|
||||
spdk_bdev_close(desc);
|
||||
free_bdev(bdev);
|
||||
spdk_bdev_finish(bdev_fini_cb, NULL);
|
||||
poll_threads();
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_io_wait_test(void)
|
||||
{
|
||||
@ -1567,6 +1603,7 @@ main(int argc, char **argv)
|
||||
CU_add_test(suite, "open_write", open_write_test) == NULL ||
|
||||
CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
|
||||
CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
|
||||
CU_add_test(suite, "bdev_io_types", bdev_io_types_test) == NULL ||
|
||||
CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
|
||||
CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
|
||||
CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
|
||||
|
@ -61,7 +61,6 @@ bool lvol_already_opened = false;
|
||||
bool g_examine_done = false;
|
||||
bool g_bdev_alias_already_exists = false;
|
||||
bool g_lvs_with_name_already_exists = false;
|
||||
bool g_lvol_deletable = true;
|
||||
|
||||
int
|
||||
spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
|
||||
@ -446,7 +445,7 @@ spdk_lvol_close(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_ar
|
||||
bool
|
||||
spdk_lvol_deletable(struct spdk_lvol *lvol)
|
||||
{
|
||||
return g_lvol_deletable;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
@ -1034,13 +1033,6 @@ ut_lvol_destroy(void)
|
||||
CU_ASSERT(g_lvolerrno == 0);
|
||||
lvol2 = g_lvol;
|
||||
|
||||
/* Unsuccessful lvols destroy */
|
||||
g_lvol_deletable = false;
|
||||
vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
|
||||
CU_ASSERT(g_lvol != NULL);
|
||||
CU_ASSERT(g_lvserrno == -EPERM);
|
||||
|
||||
g_lvol_deletable = true;
|
||||
/* Successful lvols destroy */
|
||||
vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
|
||||
CU_ASSERT(g_lvol == NULL);
|
||||
|
@ -45,6 +45,8 @@
|
||||
#include "blob/zeroes.c"
|
||||
#include "blob/blob_bs_dev.c"
|
||||
|
||||
extern bool g_delete_snapshot_enabled;
|
||||
|
||||
struct spdk_blob_store *g_bs;
|
||||
spdk_blob_id g_blobid;
|
||||
struct spdk_blob *g_blob;
|
||||
@ -2734,6 +2736,144 @@ bs_load(void)
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
bs_load_pending_removal(void)
|
||||
{
|
||||
struct spdk_blob_store *bs;
|
||||
struct spdk_bs_dev *dev;
|
||||
struct spdk_blob_opts opts;
|
||||
struct spdk_blob *blob, *snapshot;
|
||||
spdk_blob_id blobid, snapshotid;
|
||||
const void *value;
|
||||
size_t value_len;
|
||||
int rc;
|
||||
|
||||
dev = init_dev();
|
||||
|
||||
spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
bs = g_bs;
|
||||
|
||||
/* Create blob */
|
||||
spdk_blob_opts_init(&opts);
|
||||
opts.num_clusters = 10;
|
||||
|
||||
spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
blobid = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
blob = g_blob;
|
||||
|
||||
/* Create snapshot */
|
||||
spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
snapshotid = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot = g_blob;
|
||||
|
||||
/* Set SNAPSHOT_PENDING_REMOVAL xattr */
|
||||
snapshot->md_ro = false;
|
||||
rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
|
||||
CU_ASSERT(rc == 0);
|
||||
snapshot->md_ro = true;
|
||||
|
||||
spdk_blob_close(snapshot, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(blob, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Reload blobstore */
|
||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
|
||||
dev = init_dev();
|
||||
spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
bs = g_bs;
|
||||
|
||||
/* Snapshot should not be removed as blob is still pointing to it */
|
||||
spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot = g_blob;
|
||||
|
||||
/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
|
||||
rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
|
||||
CU_ASSERT(rc != 0);
|
||||
|
||||
/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
|
||||
snapshot->md_ro = false;
|
||||
rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
|
||||
CU_ASSERT(rc == 0);
|
||||
snapshot->md_ro = true;
|
||||
|
||||
spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
blob = g_blob;
|
||||
|
||||
/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
|
||||
_spdk_blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
|
||||
|
||||
spdk_blob_sync_md(blob, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(snapshot, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(blob, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Reload blobstore */
|
||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
|
||||
dev = init_dev();
|
||||
spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
bs = g_bs;
|
||||
|
||||
/* Snapshot should be removed as blob is not pointing to it anymore */
|
||||
spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
bs_load_custom_cluster_size(void)
|
||||
{
|
||||
@ -5294,12 +5434,16 @@ _blob_inflate_rw(bool decouple_parent)
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Try to delete base snapshot (for decouple_parent should fail while
|
||||
* dependency still exists) */
|
||||
/* Try to delete base snapshot */
|
||||
spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(decouple_parent || g_bserrno == 0);
|
||||
CU_ASSERT(!decouple_parent || g_bserrno != 0);
|
||||
|
||||
if (g_delete_snapshot_enabled) {
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
} else {
|
||||
CU_ASSERT(decouple_parent || g_bserrno == 0);
|
||||
CU_ASSERT(!decouple_parent || g_bserrno != 0);
|
||||
}
|
||||
|
||||
/* Reopen blob after snapshot deletion */
|
||||
spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
|
||||
@ -5578,15 +5722,11 @@ blob_relations(void)
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Try to delete snapshot with created clones */
|
||||
/* Try to delete snapshot with more than 1 clone */
|
||||
spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
spdk_bs_unload(bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
@ -5658,12 +5798,432 @@ blob_relations(void)
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 0);
|
||||
|
||||
/* Try to delete all blobs in the worse possible order */
|
||||
/* Try to delete blob that user should not be able to remove */
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
/* Remove all blobs */
|
||||
|
||||
spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_unload(bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
g_bs = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshot-clones relation test 2
|
||||
*
|
||||
* snapshot1
|
||||
* |
|
||||
* snapshot2
|
||||
* |
|
||||
* +-----+-----+
|
||||
* | |
|
||||
* blob(ro) snapshot3
|
||||
* | |
|
||||
* | snapshot4
|
||||
* | | |
|
||||
* clone2 clone clone3
|
||||
*/
|
||||
static void
|
||||
blob_relations2(void)
|
||||
{
|
||||
struct spdk_blob_store *bs;
|
||||
struct spdk_bs_dev *dev;
|
||||
struct spdk_bs_opts bs_opts;
|
||||
struct spdk_blob_opts opts;
|
||||
struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
|
||||
spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
|
||||
cloneid3;
|
||||
int rc;
|
||||
size_t count;
|
||||
spdk_blob_id ids[10] = {};
|
||||
|
||||
if (!g_delete_snapshot_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
dev = init_dev();
|
||||
spdk_bs_opts_init(&bs_opts);
|
||||
snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
|
||||
|
||||
spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
bs = g_bs;
|
||||
|
||||
/* 1. Create blob with 10 clusters */
|
||||
|
||||
spdk_blob_opts_init(&opts);
|
||||
opts.num_clusters = 10;
|
||||
|
||||
spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
blobid = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
blob = g_blob;
|
||||
|
||||
/* 2. Create snapshot1 */
|
||||
|
||||
spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
snapshotid1 = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot1 = g_blob;
|
||||
|
||||
CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
|
||||
|
||||
CU_ASSERT(blob->parent_id == snapshotid1);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
|
||||
|
||||
/* Check if blob is the clone of snapshot1 */
|
||||
CU_ASSERT(blob->parent_id == snapshotid1);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == blobid);
|
||||
|
||||
/* 3. Create another snapshot */
|
||||
|
||||
spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
snapshotid2 = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot2 = g_blob;
|
||||
|
||||
CU_ASSERT(spdk_blob_is_clone(snapshot2));
|
||||
CU_ASSERT(snapshot2->parent_id == snapshotid1);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
|
||||
|
||||
/* Check if snapshot2 is the clone of snapshot1 and blob
|
||||
* is a child of snapshot2 */
|
||||
CU_ASSERT(blob->parent_id == snapshotid2);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == blobid);
|
||||
|
||||
/* 4. Create clone from snapshot */
|
||||
|
||||
spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
cloneid = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
clone = g_blob;
|
||||
|
||||
CU_ASSERT(clone->parent_id == snapshotid2);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
|
||||
|
||||
/* Check if clone is on the snapshot's list */
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 2);
|
||||
CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
|
||||
CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
|
||||
|
||||
/* 5. Create snapshot of the clone */
|
||||
|
||||
spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
snapshotid3 = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot3 = g_blob;
|
||||
|
||||
CU_ASSERT(snapshot3->parent_id == snapshotid2);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
|
||||
|
||||
/* Check if clone is converted to the clone of snapshot3 and snapshot3
|
||||
* is a child of snapshot2 */
|
||||
CU_ASSERT(clone->parent_id == snapshotid3);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == cloneid);
|
||||
|
||||
/* 6. Create another snapshot of the clone */
|
||||
|
||||
spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
snapshotid4 = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot4 = g_blob;
|
||||
|
||||
CU_ASSERT(snapshot4->parent_id == snapshotid3);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
|
||||
|
||||
/* Check if clone is converted to the clone of snapshot4 and snapshot4
|
||||
* is a child of snapshot3 */
|
||||
CU_ASSERT(clone->parent_id == snapshotid4);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == cloneid);
|
||||
|
||||
/* 7. Remove snapshot 4 */
|
||||
|
||||
spdk_blob_close(snapshot4, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid4, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Check if relations are back to state from before creating snapshot 4 */
|
||||
CU_ASSERT(clone->parent_id == snapshotid3);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == cloneid);
|
||||
|
||||
/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
|
||||
|
||||
spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
cloneid3 = g_blobid;
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
|
||||
|
||||
spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
snapshot3 = g_blob;
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
spdk_blob_close(snapshot3, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* 10. Remove snapshot 1 */
|
||||
|
||||
spdk_blob_close(snapshot1, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
|
||||
CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 2);
|
||||
CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
|
||||
CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
|
||||
|
||||
/* 11. Try to create clone from read only blob */
|
||||
|
||||
/* Mark blob as read only */
|
||||
spdk_blob_set_read_only(blob);
|
||||
spdk_blob_sync_md(blob, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Create clone from read only blob */
|
||||
spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
cloneid2 = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
clone2 = g_blob;
|
||||
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, blobid, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == cloneid2);
|
||||
|
||||
/* Close blobs */
|
||||
|
||||
spdk_blob_close(clone2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(blob, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(clone, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(snapshot2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_blob_close(snapshot3, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_unload(bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
|
||||
/* Load an existing blob store */
|
||||
dev = init_dev();
|
||||
snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
|
||||
|
||||
spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
bs = g_bs;
|
||||
|
||||
/* Verify structure of loaded blob store */
|
||||
|
||||
/* snapshot2 */
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
|
||||
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 2);
|
||||
CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
|
||||
CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
|
||||
|
||||
/* blob */
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, blobid, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == cloneid2);
|
||||
|
||||
/* clone */
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 0);
|
||||
|
||||
/* snapshot3 */
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 1);
|
||||
CU_ASSERT(ids[0] == cloneid);
|
||||
|
||||
/* clone2 */
|
||||
CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
|
||||
count = SPDK_COUNTOF(ids);
|
||||
rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(count == 0);
|
||||
|
||||
/* Try to delete all blobs in the worse possible order */
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
@ -5676,26 +6236,14 @@ blob_relations(void)
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno != 0);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_unload(bs, bs_op_complete, NULL);
|
||||
poll_threads();
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
@ -6638,6 +7186,7 @@ int main(int argc, char **argv)
|
||||
CU_add_test(suite, "blob_iter", blob_iter) == NULL ||
|
||||
CU_add_test(suite, "blob_xattr", blob_xattr) == NULL ||
|
||||
CU_add_test(suite, "bs_load", bs_load) == NULL ||
|
||||
CU_add_test(suite, "bs_load_pending_removal", bs_load_pending_removal) == NULL ||
|
||||
CU_add_test(suite, "bs_load_custom_cluster_size", bs_load_custom_cluster_size) == NULL ||
|
||||
CU_add_test(suite, "bs_unload", bs_unload) == NULL ||
|
||||
CU_add_test(suite, "bs_cluster_sz", bs_cluster_sz) == NULL ||
|
||||
@ -6661,6 +7210,7 @@ int main(int argc, char **argv)
|
||||
CU_add_test(suite, "blob_snapshot_rw", blob_snapshot_rw) == NULL ||
|
||||
CU_add_test(suite, "blob_snapshot_rw_iov", blob_snapshot_rw_iov) == NULL ||
|
||||
CU_add_test(suite, "blob_relations", blob_relations) == NULL ||
|
||||
CU_add_test(suite, "blob_relations2", blob_relations2) == NULL ||
|
||||
CU_add_test(suite, "blob_inflate_rw", blob_inflate_rw) == NULL ||
|
||||
CU_add_test(suite, "blob_snapshot_freeze_io", blob_snapshot_freeze_io) == NULL ||
|
||||
CU_add_test(suite, "blob_operation_split_rw", blob_operation_split_rw) == NULL ||
|
||||
|
@ -132,6 +132,24 @@ _fs_init(void *arg)
|
||||
CU_ASSERT(g_fserrno == 0);
|
||||
}
|
||||
|
||||
static void
|
||||
_fs_load(void *arg)
|
||||
{
|
||||
struct spdk_thread *thread;
|
||||
struct spdk_bs_dev *dev;
|
||||
|
||||
g_fs = NULL;
|
||||
g_fserrno = -1;
|
||||
dev = init_dev();
|
||||
spdk_fs_load(dev, send_request, fs_op_with_handle_complete, NULL);
|
||||
thread = spdk_get_thread();
|
||||
while (spdk_thread_poll(thread, 0, 0) > 0) {}
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(g_fs != NULL);
|
||||
SPDK_CU_ASSERT_FATAL(g_fs->bdev == dev);
|
||||
CU_ASSERT(g_fserrno == 0);
|
||||
}
|
||||
|
||||
static void
|
||||
_fs_unload(void *arg)
|
||||
{
|
||||
@ -145,6 +163,11 @@ _fs_unload(void *arg)
|
||||
g_fs = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
_nop(void *arg)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
cache_write(void)
|
||||
{
|
||||
@ -184,6 +207,139 @@ cache_write(void)
|
||||
ut_send_request(_fs_unload, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
file_length(void)
|
||||
{
|
||||
int rc;
|
||||
char *buf;
|
||||
uint64_t buf_length;
|
||||
struct spdk_fs_thread_ctx *channel;
|
||||
struct spdk_file_stat stat = {0};
|
||||
|
||||
ut_send_request(_fs_init, NULL);
|
||||
|
||||
channel = spdk_fs_alloc_thread_ctx(g_fs);
|
||||
|
||||
g_file = NULL;
|
||||
rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
|
||||
CU_ASSERT(rc == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_file != NULL);
|
||||
|
||||
/* Write one CACHE_BUFFER. Filling at least one cache buffer triggers
|
||||
* a flush to disk.
|
||||
*/
|
||||
buf_length = CACHE_BUFFER_SIZE;
|
||||
buf = calloc(1, buf_length);
|
||||
spdk_file_write(g_file, channel, buf, 0, buf_length);
|
||||
free(buf);
|
||||
|
||||
/* Spin until all of the data has been flushed to the SSD. There's been no
|
||||
* sync operation yet, so the xattr on the file is still 0.
|
||||
*/
|
||||
while (g_file->length_flushed != buf_length) {}
|
||||
|
||||
/* Close the file. This causes an implicit sync which should write the
|
||||
* length_flushed value as the "length" xattr on the file.
|
||||
*/
|
||||
spdk_file_close(g_file, channel);
|
||||
|
||||
rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(buf_length == stat.size);
|
||||
|
||||
spdk_fs_free_thread_ctx(channel);
|
||||
|
||||
/* Unload and reload the filesystem. The file length will be
|
||||
* read during load from the length xattr. We want to make sure
|
||||
* it matches what was written when the file was originally
|
||||
* written and closed.
|
||||
*/
|
||||
ut_send_request(_fs_unload, NULL);
|
||||
|
||||
ut_send_request(_fs_load, NULL);
|
||||
|
||||
channel = spdk_fs_alloc_thread_ctx(g_fs);
|
||||
|
||||
rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(buf_length == stat.size);
|
||||
|
||||
g_file = NULL;
|
||||
rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &g_file);
|
||||
CU_ASSERT(rc == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_file != NULL);
|
||||
|
||||
spdk_file_close(g_file, channel);
|
||||
|
||||
rc = spdk_fs_delete_file(g_fs, channel, "testfile");
|
||||
CU_ASSERT(rc == 0);
|
||||
|
||||
spdk_fs_free_thread_ctx(channel);
|
||||
|
||||
ut_send_request(_fs_unload, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
partial_buffer(void)
|
||||
{
|
||||
int rc;
|
||||
char *buf;
|
||||
uint64_t buf_length;
|
||||
struct spdk_fs_thread_ctx *channel;
|
||||
struct spdk_file_stat stat = {0};
|
||||
|
||||
ut_send_request(_fs_init, NULL);
|
||||
|
||||
channel = spdk_fs_alloc_thread_ctx(g_fs);
|
||||
|
||||
g_file = NULL;
|
||||
rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
|
||||
CU_ASSERT(rc == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_file != NULL);
|
||||
|
||||
/* Write one CACHE_BUFFER plus one byte. Filling at least one cache buffer triggers
|
||||
* a flush to disk. We want to make sure the extra byte is not implicitly flushed.
|
||||
* It should only get flushed once we sync or close the file.
|
||||
*/
|
||||
buf_length = CACHE_BUFFER_SIZE + 1;
|
||||
buf = calloc(1, buf_length);
|
||||
spdk_file_write(g_file, channel, buf, 0, buf_length);
|
||||
free(buf);
|
||||
|
||||
/* Send some nop messages to the dispatch thread. This will ensure any of the
|
||||
* pending write operations are completed. A well-functioning blobfs should only
|
||||
* issue one write for the filled CACHE_BUFFER - a buggy one might try to write
|
||||
* the extra byte. So do a bunch of _nops to make sure all of them (even the buggy
|
||||
* ones) get a chance to run. Note that we can't just send a message to the
|
||||
* dispatch thread to call spdk_thread_poll() because the messages are themselves
|
||||
* run in the context of spdk_thread_poll().
|
||||
*/
|
||||
ut_send_request(_nop, NULL);
|
||||
ut_send_request(_nop, NULL);
|
||||
ut_send_request(_nop, NULL);
|
||||
ut_send_request(_nop, NULL);
|
||||
ut_send_request(_nop, NULL);
|
||||
ut_send_request(_nop, NULL);
|
||||
|
||||
CU_ASSERT(g_file->length_flushed == CACHE_BUFFER_SIZE);
|
||||
|
||||
/* Close the file. This causes an implicit sync which should write the
|
||||
* length_flushed value as the "length" xattr on the file.
|
||||
*/
|
||||
spdk_file_close(g_file, channel);
|
||||
|
||||
rc = spdk_fs_file_stat(g_fs, channel, "testfile", &stat);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(buf_length == stat.size);
|
||||
|
||||
rc = spdk_fs_delete_file(g_fs, channel, "testfile");
|
||||
CU_ASSERT(rc == 0);
|
||||
|
||||
spdk_fs_free_thread_ctx(channel);
|
||||
|
||||
ut_send_request(_fs_unload, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
cache_write_null_buffer(void)
|
||||
{
|
||||
@ -369,6 +525,8 @@ int main(int argc, char **argv)
|
||||
|
||||
if (
|
||||
CU_add_test(suite, "write", cache_write) == NULL ||
|
||||
CU_add_test(suite, "file length", file_length) == NULL ||
|
||||
CU_add_test(suite, "partial buffer", partial_buffer) == NULL ||
|
||||
CU_add_test(suite, "write_null_buffer", cache_write_null_buffer) == NULL ||
|
||||
CU_add_test(suite, "create_sync", fs_create_sync) == NULL ||
|
||||
CU_add_test(suite, "append_no_cache", cache_append_no_cache) == NULL ||
|
||||
|
@ -74,7 +74,7 @@ log_test(void)
|
||||
CU_ASSERT(spdk_log_get_flag("log") == false);
|
||||
#endif
|
||||
|
||||
spdk_log_open();
|
||||
spdk_log_open(NULL);
|
||||
spdk_log_set_flag("log");
|
||||
SPDK_WARNLOG("log warning unit test\n");
|
||||
SPDK_DEBUGLOG(SPDK_LOG_LOG, "log test\n");
|
||||
|
@ -9,10 +9,6 @@ PLUGIN_DIR=$ROOT_DIR/examples/bdev/fio_plugin
|
||||
FIO_PATH="/usr/src/fio"
|
||||
virtio_bdevs=""
|
||||
virtio_with_unmap=""
|
||||
os_image="/home/sys_sgsw/vhost_vm_image.qcow2"
|
||||
#different linux distributions have different versions of targetcli that have different names for ramdisk option
|
||||
targetcli_rd_name=""
|
||||
kernel_vhost_disk="naa.5012345678901234"
|
||||
|
||||
function usage()
|
||||
{
|
||||
@ -20,7 +16,6 @@ function usage()
|
||||
echo "Script for running vhost initiator tests."
|
||||
echo "Usage: $(basename $1) [-h|--help] [--fiobin=PATH]"
|
||||
echo "-h, --help Print help and exit"
|
||||
echo " --vm_image=PATH Path to VM image used in these tests [default=$os_image]"
|
||||
echo " --fiopath=PATH Path to fio directory on host [default=$FIO_PATH]"
|
||||
}
|
||||
|
||||
@ -30,7 +25,6 @@ while getopts 'h-:' optchar; do
|
||||
case "$OPTARG" in
|
||||
help) usage $0 && exit 0 ;;
|
||||
fiopath=*) FIO_PATH="${OPTARG#*=}" ;;
|
||||
vm_image=*) os_image="${OPTARG#*=}" ;;
|
||||
*) usage $0 echo "Invalid argument '$OPTARG'" && exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
@ -53,27 +47,8 @@ if [[ $EUID -ne 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if targetcli ls backstores | grep ramdisk ; then
|
||||
targetcli_rd_name="ramdisk"
|
||||
elif targetcli ls backstores | grep rd_mcp ; then
|
||||
targetcli_rd_name="rd_mcp"
|
||||
else
|
||||
error "targetcli: cannot create a ramdisk.\
|
||||
Neither backstores/ramdisk nor backstores/rd_mcp is available"
|
||||
fi
|
||||
|
||||
function remove_kernel_vhost()
|
||||
{
|
||||
if targetcli "/vhost/$kernel_vhost_disk ls"; then
|
||||
targetcli "/vhost delete $kernel_vhost_disk"
|
||||
fi
|
||||
if targetcli "/backstores/$targetcli_rd_name/ramdisk ls"; then
|
||||
targetcli "/backstores/$targetcli_rd_name delete ramdisk"
|
||||
fi
|
||||
}
|
||||
|
||||
trap 'rm -f *.state $ROOT_DIR/spdk.tar.gz $ROOT_DIR/fio.tar.gz $(get_vhost_dir)/Virtio0;\
|
||||
remove_kernel_vhost; error_exit "${FUNCNAME}""${LINENO}"' ERR SIGTERM SIGABRT
|
||||
error_exit "${FUNCNAME}""${LINENO}"' ERR SIGTERM SIGABRT
|
||||
function run_spdk_fio() {
|
||||
LD_PRELOAD=$PLUGIN_DIR/fio_plugin $FIO_PATH/fio --ioengine=spdk_bdev\
|
||||
"$@" --spdk_mem=1024 --spdk_single_seg=1
|
||||
@ -131,72 +106,6 @@ run_spdk_fio $INITIATOR_DIR/bdev.fio --filename=$virtio_with_unmap --spdk_conf=$
|
||||
--spdk_conf=$INITIATOR_DIR/bdev.conf
|
||||
timing_exit run_spdk_fio_unmap
|
||||
|
||||
timing_enter create_kernel_vhost
|
||||
targetcli "/backstores/$targetcli_rd_name create name=ramdisk size=1GB"
|
||||
targetcli "/vhost create $kernel_vhost_disk"
|
||||
targetcli "/vhost/$kernel_vhost_disk/tpg1/luns create /backstores/$targetcli_rd_name/ramdisk"
|
||||
timing_exit create_kernel_vhost
|
||||
|
||||
timing_enter setup_vm
|
||||
vm_no="0"
|
||||
vm_setup --disk-type=spdk_vhost_scsi --force=$vm_no --os=$os_image \
|
||||
--disks="Nvme0n1_scsi0:Malloc0:Malloc1:$kernel_vhost_disk,kernel_vhost:Virtio0,virtio:\
|
||||
Nvme0n1_blk0,spdk_vhost_blk:Nvme0n1_blk1,spdk_vhost_blk" \
|
||||
--queue_num=8 --memory=6144
|
||||
vm_run $vm_no
|
||||
|
||||
timing_enter vm_wait_for_boot
|
||||
vm_wait_for_boot 300 $vm_no
|
||||
timing_exit vm_wait_for_boot
|
||||
|
||||
timing_enter vm_scp_spdk
|
||||
touch $ROOT_DIR/spdk.tar.gz
|
||||
tar --exclude="spdk.tar.gz" --exclude="*.o" --exclude="*.d" --exclude=".git" -C $ROOT_DIR -zcf $ROOT_DIR/spdk.tar.gz .
|
||||
vm_scp $vm_no $ROOT_DIR/spdk.tar.gz "127.0.0.1:/root"
|
||||
vm_ssh $vm_no "mkdir -p /root/spdk; tar -zxf /root/spdk.tar.gz -C /root/spdk --strip-components=1"
|
||||
|
||||
touch $ROOT_DIR/fio.tar.gz
|
||||
tar --exclude="fio.tar.gz" --exclude="*.o" --exclude="*.d" --exclude=".git" -C $FIO_PATH -zcf $ROOT_DIR/fio.tar.gz .
|
||||
vm_scp $vm_no $ROOT_DIR/fio.tar.gz "127.0.0.1:/root"
|
||||
vm_ssh $vm_no "rm -rf /root/fio_src; mkdir -p /root/fio_src; tar -zxf /root/fio.tar.gz -C /root/fio_src --strip-components=1"
|
||||
timing_exit vm_scp_spdk
|
||||
|
||||
timing_enter vm_build_spdk
|
||||
nproc=$(vm_ssh $vm_no "nproc")
|
||||
vm_ssh $vm_no " cd /root/fio_src ; make clean ; make -j${nproc} ; make install"
|
||||
vm_ssh $vm_no " cd spdk ; ./configure --with-fio=/root/fio_src ; make clean ; make -j${nproc}"
|
||||
timing_exit vm_build_spdk
|
||||
|
||||
vm_ssh $vm_no "/root/spdk/scripts/setup.sh"
|
||||
vbdevs=$(vm_ssh $vm_no ". /root/spdk/test/common/autotest_common.sh && discover_bdevs /root/spdk \
|
||||
/root/spdk/test/vhost/initiator/bdev_pci.conf")
|
||||
virtio_bdevs=$(jq -r '[.[].name] | join(":")' <<< $vbdevs)
|
||||
virtio_with_unmap=$(jq -r '[.[] | select(.supported_io_types.unmap==true).name]
|
||||
| join(":")' <<< $vbdevs)
|
||||
timing_exit setup_vm
|
||||
|
||||
timing_enter run_spdk_fio_pci
|
||||
vm_ssh $vm_no "LD_PRELOAD=/root/spdk/examples/bdev/fio_plugin/fio_plugin /root/fio_src/fio --ioengine=spdk_bdev \
|
||||
/root/spdk/test/vhost/initiator/bdev.fio --filename=$virtio_bdevs --section=job_randwrite \
|
||||
--section=job_randrw --section=job_write --section=job_rw \
|
||||
--spdk_conf=/root/spdk/test/vhost/initiator/bdev_pci.conf --spdk_mem=1024 --spdk_single_seg=1"
|
||||
timing_exit run_spdk_fio_pci
|
||||
|
||||
timing_enter run_spdk_fio_pci_unmap
|
||||
vm_ssh $vm_no "LD_PRELOAD=/root/spdk/examples/bdev/fio_plugin/fio_plugin /root/fio_src/fio --ioengine=spdk_bdev \
|
||||
/root/spdk/test/vhost/initiator/bdev.fio --filename=$virtio_with_unmap \
|
||||
--spdk_conf=/root/spdk/test/vhost/initiator/bdev_pci.conf --spdk_mem=1024 --spdk_single_seg=1"
|
||||
timing_exit run_spdk_fio_pci_unmap
|
||||
|
||||
timing_enter vm_shutdown_all
|
||||
vm_shutdown_all
|
||||
timing_exit vm_shutdown_all
|
||||
|
||||
rm -f *.state $ROOT_DIR/spdk.tar.gz $ROOT_DIR/fio.tar.gz $(get_vhost_dir)/Virtio0
|
||||
timing_enter remove_kernel_vhost
|
||||
remove_kernel_vhost
|
||||
timing_exit remove_kernel_vhost
|
||||
|
||||
$RPC_PY delete_nvme_controller Nvme0
|
||||
|
||||
timing_enter spdk_vhost_kill
|
||||
|
Loading…
Reference in New Issue
Block a user