Compare commits
11 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f49fa23d9a | ||
|
001edfaabf | ||
|
5bf3a5f383 | ||
|
7cb8df7a4e | ||
|
727a80b328 | ||
|
94f3053de3 | ||
|
16c8d766b5 | ||
|
494c365eca | ||
|
e110a0d1e6 | ||
|
5f59d919a4 | ||
|
f1b747f50f |
13
CHANGELOG.md
13
CHANGELOG.md
@ -1,5 +1,18 @@
|
||||
# Changelog
|
||||
|
||||
## v18.04.2: Maintenance Release
|
||||
|
||||
## v18.04.1: Maintenance Release
|
||||
|
||||
SPDK v18.04.1 is a bug fix and maintenance release.
|
||||
|
||||
A bug in the blobstore recovery code for thin provisioned blobs has been fixed
|
||||
(GitHub issue #291).
|
||||
|
||||
The `env_dpdk` environment layer has been updated to work with DPDK 18.05.
|
||||
|
||||
The NVMe and bdev `fio_plugin` examples have been updated to work with FIO 3.7.
|
||||
|
||||
## v18.04: Logical Volume Snapshot/Clone, iSCSI Initiator, Bdev QoS, VPP Userspace TCP/IP
|
||||
|
||||
### vhost
|
||||
|
@ -489,7 +489,13 @@ spdk_fio_completion_cb(struct spdk_bdev_io *bdev_io,
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
}
|
||||
|
||||
static int
|
||||
#if FIO_IOOPS_VERSION >= 24
|
||||
typedef enum fio_q_status fio_q_status_t;
|
||||
#else
|
||||
typedef int fio_q_status_t;
|
||||
#endif
|
||||
|
||||
static fio_q_status_t
|
||||
spdk_fio_queue(struct thread_data *td, struct io_u *io_u)
|
||||
{
|
||||
int rc = 1;
|
||||
|
@ -416,7 +416,14 @@ spdk_nvme_io_next_sge(void *ref, void **address, uint32_t *length)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spdk_fio_queue(struct thread_data *td, struct io_u *io_u)
|
||||
#if FIO_IOOPS_VERSION >= 24
|
||||
typedef enum fio_q_status fio_q_status_t;
|
||||
#else
|
||||
typedef int fio_q_status_t;
|
||||
#endif
|
||||
|
||||
static fio_q_status_t
|
||||
spdk_fio_queue(struct thread_data *td, struct io_u *io_u)
|
||||
{
|
||||
int rc = 1;
|
||||
struct spdk_fio_thread *fio_thread = td->io_ops_data;
|
||||
|
@ -54,12 +54,12 @@
|
||||
* Patch level is incremented on maintenance branch releases and reset to 0 for each
|
||||
* new major.minor release.
|
||||
*/
|
||||
#define SPDK_VERSION_PATCH 0
|
||||
#define SPDK_VERSION_PATCH 2
|
||||
|
||||
/**
|
||||
* Version string suffix.
|
||||
*/
|
||||
#define SPDK_VERSION_SUFFIX ""
|
||||
#define SPDK_VERSION_SUFFIX "-pre"
|
||||
|
||||
/**
|
||||
* Single numeric value representing a version number for compile-time comparisons.
|
||||
|
@ -566,7 +566,7 @@ _spdk_blob_serialize_extent(const struct spdk_blob *blob,
|
||||
struct spdk_blob_md_descriptor_extent *desc;
|
||||
size_t cur_sz;
|
||||
uint64_t i, extent_idx;
|
||||
uint32_t lba, lba_per_cluster, lba_count;
|
||||
uint64_t lba, lba_per_cluster, lba_count;
|
||||
|
||||
/* The buffer must have room for at least one extent */
|
||||
cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]);
|
||||
@ -2479,7 +2479,7 @@ static void
|
||||
_spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
uint32_t i, j;
|
||||
uint32_t i;
|
||||
int rc;
|
||||
|
||||
/* The type must be correct */
|
||||
@ -2500,13 +2500,9 @@ _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrn
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ctx->mask->length / 8; i++) {
|
||||
uint8_t segment = ctx->mask->mask[i];
|
||||
for (j = 0; segment; j++) {
|
||||
if (segment & 1U) {
|
||||
spdk_bit_array_set(ctx->bs->used_blobids, (i * 8) + j);
|
||||
}
|
||||
segment >>= 1U;
|
||||
for (i = 0; i < ctx->mask->length; i++) {
|
||||
if (ctx->mask->mask[i / 8] & (1U << (i % 8))) {
|
||||
spdk_bit_array_set(ctx->bs->used_blobids, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2518,7 +2514,7 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
uint64_t lba, lba_count, mask_size;
|
||||
uint32_t i, j;
|
||||
uint32_t i;
|
||||
int rc;
|
||||
|
||||
/* The type must be correct */
|
||||
@ -2537,15 +2533,11 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
}
|
||||
|
||||
ctx->bs->num_free_clusters = ctx->bs->total_clusters;
|
||||
for (i = 0; i < ctx->mask->length / 8; i++) {
|
||||
uint8_t segment = ctx->mask->mask[i];
|
||||
for (j = 0; segment && (j < 8); j++) {
|
||||
if (segment & 1U) {
|
||||
spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j);
|
||||
assert(ctx->bs->num_free_clusters > 0);
|
||||
ctx->bs->num_free_clusters--;
|
||||
}
|
||||
segment >>= 1U;
|
||||
for (i = 0; i < ctx->mask->length; i++) {
|
||||
if (ctx->mask->mask[i / 8] & (1U << (i % 8))) {
|
||||
spdk_bit_array_set(ctx->bs->used_clusters, i);
|
||||
assert(ctx->bs->num_free_clusters > 0);
|
||||
ctx->bs->num_free_clusters--;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2569,7 +2561,7 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
uint64_t lba, lba_count, mask_size;
|
||||
uint32_t i, j;
|
||||
uint32_t i;
|
||||
int rc;
|
||||
|
||||
/* The type must be correct */
|
||||
@ -2587,13 +2579,9 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ctx->mask->length / 8; i++) {
|
||||
uint8_t segment = ctx->mask->mask[i];
|
||||
for (j = 0; segment && (j < 8); j++) {
|
||||
if (segment & 1U) {
|
||||
spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j);
|
||||
}
|
||||
segment >>= 1U;
|
||||
for (i = 0; i < ctx->mask->length; i++) {
|
||||
if (ctx->mask->mask[i / 8] & (1U << (i % 8))) {
|
||||
spdk_bit_array_set(ctx->bs->used_md_pages, i);
|
||||
}
|
||||
}
|
||||
spdk_dma_free(ctx->mask);
|
||||
@ -2648,16 +2636,24 @@ _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct
|
||||
struct spdk_blob_md_descriptor_extent *desc_extent;
|
||||
unsigned int i, j;
|
||||
unsigned int cluster_count = 0;
|
||||
uint32_t cluster_idx;
|
||||
|
||||
desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
|
||||
|
||||
for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
|
||||
for (j = 0; j < desc_extent->extents[i].length; j++) {
|
||||
spdk_bit_array_set(bs->used_clusters, desc_extent->extents[i].cluster_idx + j);
|
||||
if (bs->num_free_clusters == 0) {
|
||||
return -1;
|
||||
cluster_idx = desc_extent->extents[i].cluster_idx;
|
||||
/*
|
||||
* cluster_idx = 0 means an unallocated cluster - don't mark that
|
||||
* in the used cluster map.
|
||||
*/
|
||||
if (cluster_idx != 0) {
|
||||
spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
|
||||
if (bs->num_free_clusters == 0) {
|
||||
return -1;
|
||||
}
|
||||
bs->num_free_clusters--;
|
||||
}
|
||||
bs->num_free_clusters--;
|
||||
cluster_count++;
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ struct spdk_blob_store {
|
||||
uint64_t total_clusters;
|
||||
uint64_t total_data_clusters;
|
||||
uint64_t num_free_clusters;
|
||||
uint32_t pages_per_cluster;
|
||||
uint64_t pages_per_cluster;
|
||||
|
||||
spdk_blob_id super_blob;
|
||||
struct spdk_bs_type bstype;
|
||||
@ -399,7 +399,7 @@ _spdk_bs_dev_page_to_lba(struct spdk_bs_dev *bs_dev, uint64_t page)
|
||||
return page * SPDK_BS_PAGE_SIZE / bs_dev->blocklen;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
static inline uint64_t
|
||||
_spdk_bs_lba_to_page(struct spdk_blob_store *bs, uint64_t lba)
|
||||
{
|
||||
uint64_t lbas_per_page;
|
||||
@ -426,7 +426,7 @@ _spdk_bs_dev_lba_to_page(struct spdk_bs_dev *bs_dev, uint64_t lba)
|
||||
static inline uint64_t
|
||||
_spdk_bs_cluster_to_page(struct spdk_blob_store *bs, uint32_t cluster)
|
||||
{
|
||||
return cluster * bs->pages_per_cluster;
|
||||
return (uint64_t)cluster * bs->pages_per_cluster;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -440,7 +440,7 @@ _spdk_bs_page_to_cluster(struct spdk_blob_store *bs, uint64_t page)
|
||||
static inline uint64_t
|
||||
_spdk_bs_cluster_to_lba(struct spdk_blob_store *bs, uint32_t cluster)
|
||||
{
|
||||
return cluster * (bs->cluster_sz / bs->dev->blocklen);
|
||||
return (uint64_t)cluster * (bs->cluster_sz / bs->dev->blocklen);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -465,7 +465,7 @@ _spdk_bs_blob_lba_from_back_dev_lba(struct spdk_blob *blob, uint64_t lba)
|
||||
|
||||
/* End basic conversions */
|
||||
|
||||
static inline uint32_t
|
||||
static inline uint64_t
|
||||
_spdk_bs_blobid_to_page(spdk_blob_id id)
|
||||
{
|
||||
return id & 0xFFFFFFFF;
|
||||
@ -476,8 +476,11 @@ _spdk_bs_blobid_to_page(spdk_blob_id id)
|
||||
* code assumes blob id == page_idx.
|
||||
*/
|
||||
static inline spdk_blob_id
|
||||
_spdk_bs_page_to_blobid(uint32_t page_idx)
|
||||
_spdk_bs_page_to_blobid(uint64_t page_idx)
|
||||
{
|
||||
if (page_idx > UINT32_MAX) {
|
||||
return SPDK_BLOBID_INVALID;
|
||||
}
|
||||
return SPDK_BLOB_BLOBID_HIGH_BIT | page_idx;
|
||||
}
|
||||
|
||||
@ -485,10 +488,10 @@ _spdk_bs_page_to_blobid(uint32_t page_idx)
|
||||
* start of that page.
|
||||
*/
|
||||
static inline uint64_t
|
||||
_spdk_bs_blob_page_to_lba(struct spdk_blob *blob, uint32_t page)
|
||||
_spdk_bs_blob_page_to_lba(struct spdk_blob *blob, uint64_t page)
|
||||
{
|
||||
uint64_t lba;
|
||||
uint32_t pages_per_cluster;
|
||||
uint64_t pages_per_cluster;
|
||||
|
||||
pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
|
||||
@ -504,9 +507,9 @@ _spdk_bs_blob_page_to_lba(struct spdk_blob *blob, uint32_t page)
|
||||
* next cluster boundary.
|
||||
*/
|
||||
static inline uint32_t
|
||||
_spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob *blob, uint32_t page)
|
||||
_spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob *blob, uint64_t page)
|
||||
{
|
||||
uint32_t pages_per_cluster;
|
||||
uint64_t pages_per_cluster;
|
||||
|
||||
pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
|
||||
@ -515,9 +518,9 @@ _spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob *blob, uint32_t page)
|
||||
|
||||
/* Given a page offset into a blob, look up the number of pages into blob to beginning of current cluster */
|
||||
static inline uint32_t
|
||||
_spdk_bs_page_to_cluster_start(struct spdk_blob *blob, uint32_t page)
|
||||
_spdk_bs_page_to_cluster_start(struct spdk_blob *blob, uint64_t page)
|
||||
{
|
||||
uint32_t pages_per_cluster;
|
||||
uint64_t pages_per_cluster;
|
||||
|
||||
pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
|
||||
@ -526,10 +529,10 @@ _spdk_bs_page_to_cluster_start(struct spdk_blob *blob, uint32_t page)
|
||||
|
||||
/* Given a page offset into a blob, look up if it is from allocated cluster. */
|
||||
static inline bool
|
||||
_spdk_bs_page_is_allocated(struct spdk_blob *blob, uint32_t page)
|
||||
_spdk_bs_page_is_allocated(struct spdk_blob *blob, uint64_t page)
|
||||
{
|
||||
uint64_t lba;
|
||||
uint32_t pages_per_cluster;
|
||||
uint64_t pages_per_cluster;
|
||||
|
||||
pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
|
||||
|
@ -80,7 +80,8 @@ endif
|
||||
|
||||
DPDK_LIB = $(DPDK_LIB_LIST:%=$(DPDK_ABS_DIR)/lib/lib%$(DPDK_LIB_EXT))
|
||||
|
||||
ENV_CFLAGS = $(DPDK_INC)
|
||||
# SPDK memory registration requires experimental (deprecated) rte_memory API for DPDK 18.05
|
||||
ENV_CFLAGS = $(DPDK_INC) -Wno-deprecated-declarations
|
||||
ENV_CXXFLAGS = $(ENV_CFLAGS)
|
||||
ENV_DPDK_FILE = $(call spdk_lib_list_to_files,env_dpdk)
|
||||
ENV_LIBS = $(ENV_DPDK_FILE) $(DPDK_LIB)
|
||||
|
@ -498,12 +498,29 @@ spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr)
|
||||
return map_2mb->translation_2mb;
|
||||
}
|
||||
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0)
|
||||
static void
|
||||
memory_hotplug_cb(enum rte_mem_event event_type,
|
||||
const void *addr, size_t len, void *arg)
|
||||
{
|
||||
if (event_type == RTE_MEM_EVENT_ALLOC) {
|
||||
spdk_mem_register((void *)addr, len);
|
||||
} else if (event_type == RTE_MEM_EVENT_FREE) {
|
||||
spdk_mem_unregister((void *)addr, len);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
memory_iter_cb(const struct rte_memseg_list *msl,
|
||||
const struct rte_memseg *ms, size_t len, void *arg)
|
||||
{
|
||||
return spdk_mem_register(ms->addr, len);
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
spdk_mem_map_init(void)
|
||||
{
|
||||
struct rte_mem_config *mcfg;
|
||||
size_t seg_idx;
|
||||
|
||||
g_mem_reg_map = spdk_mem_map_alloc(0, NULL, NULL);
|
||||
if (g_mem_reg_map == NULL) {
|
||||
DEBUG_PRINT("memory registration map allocation failed\n");
|
||||
@ -514,8 +531,14 @@ spdk_mem_map_init(void)
|
||||
* Walk all DPDK memory segments and register them
|
||||
* with the master memory map
|
||||
*/
|
||||
mcfg = rte_eal_get_configuration()->mem_config;
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0)
|
||||
rte_mem_event_callback_register("spdk", memory_hotplug_cb, NULL);
|
||||
rte_memseg_contig_walk(memory_iter_cb, NULL);
|
||||
#else
|
||||
struct rte_mem_config *mcfg;
|
||||
size_t seg_idx;
|
||||
|
||||
mcfg = rte_eal_get_configuration()->mem_config;
|
||||
for (seg_idx = 0; seg_idx < RTE_MAX_MEMSEG; seg_idx++) {
|
||||
struct rte_memseg *seg = &mcfg->memseg[seg_idx];
|
||||
|
||||
@ -525,5 +548,6 @@ spdk_mem_map_init(void)
|
||||
|
||||
spdk_mem_register(seg->addr, seg->len);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -214,12 +214,23 @@ static uint64_t
|
||||
vtophys_get_paddr_memseg(uint64_t vaddr)
|
||||
{
|
||||
uintptr_t paddr;
|
||||
struct rte_mem_config *mcfg;
|
||||
struct rte_memseg *seg;
|
||||
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0)
|
||||
seg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
|
||||
if (seg != NULL) {
|
||||
paddr = seg->phys_addr;
|
||||
if (paddr == RTE_BAD_IOVA) {
|
||||
return SPDK_VTOPHYS_ERROR;
|
||||
}
|
||||
paddr += (vaddr - (uintptr_t)seg->addr);
|
||||
return paddr;
|
||||
}
|
||||
#else
|
||||
struct rte_mem_config *mcfg;
|
||||
uint32_t seg_idx;
|
||||
|
||||
mcfg = rte_eal_get_configuration()->mem_config;
|
||||
|
||||
for (seg_idx = 0; seg_idx < RTE_MAX_MEMSEG; seg_idx++) {
|
||||
seg = &mcfg->memseg[seg_idx];
|
||||
if (seg->addr == NULL) {
|
||||
@ -240,6 +251,7 @@ vtophys_get_paddr_memseg(uint64_t vaddr)
|
||||
return paddr;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return SPDK_VTOPHYS_ERROR;
|
||||
}
|
||||
|
@ -347,6 +347,7 @@ spdk_app_setup_env(struct spdk_app_opts *opts)
|
||||
env_opts.mem_channel = opts->mem_channel;
|
||||
env_opts.master_core = opts->master_core;
|
||||
env_opts.mem_size = opts->mem_size;
|
||||
env_opts.hugepage_single_segments = opts->hugepage_single_segments;
|
||||
env_opts.no_pci = opts->no_pci;
|
||||
|
||||
rc = spdk_env_init(&env_opts);
|
||||
|
@ -226,6 +226,14 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max)
|
||||
SPDK_ERRLOG("Exceed maximum of %d\n", max);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (idx > 0 &&
|
||||
strncmp(tmp, huges[idx - 1].path, PATH_MAX) == 0 &&
|
||||
v_start == huges[idx - 1].addr + huges[idx - 1].size) {
|
||||
huges[idx - 1].size += (v_end - v_start);
|
||||
continue;
|
||||
}
|
||||
|
||||
huges[idx].addr = v_start;
|
||||
huges[idx].size = v_end - v_start;
|
||||
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
|
||||
|
10
test/env/memory/memory_ut.c
vendored
10
test/env/memory/memory_ut.c
vendored
@ -50,6 +50,16 @@ rte_eal_get_configuration(void)
|
||||
return &g_cfg;
|
||||
}
|
||||
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0)
|
||||
typedef void (*rte_mem_event_callback_t)(enum rte_mem_event event_type,
|
||||
const void *addr, size_t len, void *arg);
|
||||
typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg_list *msl,
|
||||
const struct rte_memseg *ms, size_t len, void *arg);
|
||||
DEFINE_STUB(rte_mem_event_callback_register, int, (const char *name, rte_mem_event_callback_t clb,
|
||||
void *arg), 0);
|
||||
DEFINE_STUB(rte_memseg_contig_walk, int, (rte_memseg_contig_walk_t func, void *arg), 0);
|
||||
#endif
|
||||
|
||||
#define PAGE_ARRAY_SIZE (100)
|
||||
static struct spdk_bit_array *g_page_array;
|
||||
|
||||
|
@ -524,9 +524,10 @@ blob_thin_provision(void)
|
||||
spdk_blob_close(blob, blob_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
/* Do not shut down cleanly. This makes sure that when we load again
|
||||
* and try to recover a valid used_cluster map, that blobstore will
|
||||
* ignore clusters with index 0 since these are unallocated clusters.
|
||||
*/
|
||||
|
||||
/* Load an existing blob store and check if invalid_flags is set */
|
||||
dev = init_dev();
|
||||
|
Loading…
Reference in New Issue
Block a user