lib/ftl: Track cached lba map segments
Keep track of read lba map segments to avoid unnecessary traffic in case ANM events. Lba map is divided on 4KB segments which can store 512 lba entries. In case multipe read request on same segment keep pending request list and process it in read_lba_map() completion callback. Change-Id: I2661bdb716ab7c975140e0b37aebcb17aa23901d Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/453371 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
7729225fa8
commit
d23cc88d73
@ -154,7 +154,8 @@ ftl_band_clear_lba_map(struct ftl_band *band)
|
||||
struct ftl_lba_map *lba_map = &band->lba_map;
|
||||
|
||||
spdk_bit_array_clear_mask(lba_map->vld);
|
||||
memset(lba_map->map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
|
||||
memset(lba_map->map, 0, ftl_lba_map_pool_elem_size(band->dev));
|
||||
|
||||
lba_map->num_vld = 0;
|
||||
}
|
||||
|
||||
@ -175,6 +176,7 @@ ftl_band_free_lba_map(struct ftl_band *band)
|
||||
assert(ftl_band_validate_md(band) == true);
|
||||
}
|
||||
|
||||
memset(lba_map->map, 0, ftl_lba_map_pool_elem_size(band->dev));
|
||||
spdk_mempool_put(dev->lba_pool, lba_map->map);
|
||||
spdk_dma_free(lba_map->dma_buf);
|
||||
lba_map->map = NULL;
|
||||
@ -409,7 +411,7 @@ ftl_unpack_head_md(struct ftl_band *band)
|
||||
struct ftl_ppa
|
||||
ftl_band_tail_md_ppa(struct ftl_band *band)
|
||||
{
|
||||
struct ftl_ppa ppa;
|
||||
struct ftl_ppa ppa = {};
|
||||
struct ftl_chunk *chunk;
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
size_t xfer_size = dev->xfer_size;
|
||||
@ -696,6 +698,8 @@ ftl_band_alloc_lba_map(struct ftl_band *band)
|
||||
return -1;
|
||||
}
|
||||
|
||||
lba_map->segments = (char *)lba_map->map + ftl_lba_map_num_lbks(dev) * FTL_BLOCK_SIZE;
|
||||
|
||||
lba_map->dma_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE,
|
||||
FTL_BLOCK_SIZE, NULL);
|
||||
if (!lba_map->dma_buf) {
|
||||
@ -737,8 +741,8 @@ ftl_read_md_cb(struct ftl_io *io, void *arg, int status)
|
||||
|
||||
static struct ftl_md_io *
|
||||
ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
|
||||
struct ftl_band *band, size_t lbk_cnt, ftl_io_fn fn,
|
||||
ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
struct ftl_band *band, size_t lbk_cnt, void *buf,
|
||||
ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
{
|
||||
struct ftl_md_io *io;
|
||||
struct ftl_io_init_opts opts = {
|
||||
@ -751,7 +755,7 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
|
||||
.type = FTL_IO_READ,
|
||||
.lbk_cnt = lbk_cnt,
|
||||
.cb_fn = fn,
|
||||
.data = band->lba_map.dma_buf,
|
||||
.data = buf,
|
||||
};
|
||||
|
||||
io = (struct ftl_md_io *)ftl_io_init_internal(&opts);
|
||||
@ -840,7 +844,7 @@ ftl_band_lba_map_ppa(struct ftl_band *band, size_t offset)
|
||||
|
||||
static int
|
||||
ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa,
|
||||
ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
void *buf, ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
struct ftl_md_io *io;
|
||||
@ -849,7 +853,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
io = ftl_io_init_md_read(dev, start_ppa, band, lbk_cnt, fn, pack_fn, cb_fn, cb_ctx);
|
||||
io = ftl_io_init_md_read(dev, start_ppa, band, lbk_cnt, buf, fn, pack_fn, cb_fn, cb_ctx);
|
||||
if (!io) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -861,10 +865,64 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
|
||||
int
|
||||
ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa ppa, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
{
|
||||
return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), ppa,
|
||||
return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), ppa, band->lba_map.dma_buf,
|
||||
ftl_read_md_cb, ftl_unpack_tail_md, cb_fn, cb_ctx);
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_lba_map_request_segment_done(struct ftl_lba_map_request *request, size_t offset,
|
||||
size_t num_segments)
|
||||
{
|
||||
size_t i, num_done = 0;
|
||||
|
||||
for (i = offset; i < offset + num_segments; ++i) {
|
||||
if (spdk_bit_array_get(request->segments, i)) {
|
||||
spdk_bit_array_clear(request->segments, offset);
|
||||
num_done++;
|
||||
}
|
||||
}
|
||||
|
||||
assert(request->num_pending >= num_done);
|
||||
request->num_pending -= num_done;
|
||||
|
||||
return num_done;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_lba_map_set_segment_state(struct ftl_lba_map *lba_map, size_t offset, size_t num_segments,
|
||||
enum ftl_lba_map_seg_state state)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = offset; i < offset + num_segments; ++i) {
|
||||
lba_map->segments[i] = state;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_lba_map_request_free(struct spdk_ftl_dev *dev, struct ftl_lba_map_request *request)
|
||||
{
|
||||
spdk_bit_array_clear_mask(request->segments);
|
||||
spdk_mempool_put(dev->lba_request_pool, request);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_process_lba_map_requests(struct spdk_ftl_dev *dev, struct ftl_lba_map *lba_map, size_t offset,
|
||||
size_t num_segments, int status)
|
||||
{
|
||||
struct ftl_lba_map_request *request, *trequest;
|
||||
size_t num_done;
|
||||
|
||||
LIST_FOREACH_SAFE(request, &lba_map->request_list, list_entry, trequest) {
|
||||
num_done = ftl_lba_map_request_segment_done(request, offset, num_segments);
|
||||
if (request->num_pending == 0 || (status && num_done)) {
|
||||
request->cb(NULL, request->cb_ctx, status);
|
||||
LIST_REMOVE(request, list_entry);
|
||||
ftl_lba_map_request_free(dev, request);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_lba_map_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
{
|
||||
@ -880,34 +938,118 @@ ftl_lba_map_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
static void
|
||||
ftl_read_lba_map_cb(struct ftl_io *io, void *arg, int status)
|
||||
{
|
||||
struct ftl_md_io *md_io = (struct ftl_md_io *)io;
|
||||
struct ftl_lba_map *lba_map = &io->band->lba_map;
|
||||
uint64_t offset;
|
||||
uint64_t lbk_off;
|
||||
|
||||
offset = ftl_lba_map_offset_from_ppa(io->band, io->ppa);
|
||||
assert(offset + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));
|
||||
lbk_off = ftl_lba_map_offset_from_ppa(io->band, io->ppa);
|
||||
assert(lbk_off + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));
|
||||
|
||||
if (!status) {
|
||||
memcpy((char *)lba_map->map + offset * FTL_BLOCK_SIZE, lba_map->dma_buf,
|
||||
memcpy((char *)lba_map->map + lbk_off * FTL_BLOCK_SIZE,
|
||||
io->iov[0].iov_base,
|
||||
io->lbk_cnt * FTL_BLOCK_SIZE);
|
||||
|
||||
ftl_lba_map_set_segment_state(lba_map, lbk_off, io->lbk_cnt,
|
||||
FTL_LBA_MAP_SEG_CACHED);
|
||||
}
|
||||
|
||||
md_io->cb_fn(io, md_io->cb_ctx, status);
|
||||
ftl_process_lba_map_requests(io->dev, lba_map, lbk_off, io->lbk_cnt, status);
|
||||
}
|
||||
|
||||
static struct ftl_lba_map_request *
|
||||
ftl_lba_map_alloc_request(struct ftl_band *band, size_t offset, size_t num_segments,
|
||||
ftl_io_fn cb, void *cb_ctx)
|
||||
{
|
||||
struct ftl_lba_map_request *request;
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
size_t i;
|
||||
|
||||
request = spdk_mempool_get(dev->lba_request_pool);
|
||||
if (!request) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
request->cb = cb;
|
||||
request->cb_ctx = cb_ctx;
|
||||
request->num_pending = num_segments;
|
||||
|
||||
for (i = offset; i < offset + num_segments; ++i) {
|
||||
spdk_bit_array_set(request->segments, i);
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_lba_map_num_clear_segments(struct ftl_lba_map *lba_map,
|
||||
size_t offset, size_t num_segments)
|
||||
{
|
||||
size_t i, cnt = 0;
|
||||
|
||||
for (i = offset; i < offset + num_segments; ++i) {
|
||||
if (lba_map->segments[i] != FTL_LBA_MAP_SEG_CLEAR) {
|
||||
break;
|
||||
}
|
||||
cnt++;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
int
|
||||
ftl_band_read_lba_map(struct ftl_band *band, size_t offset, size_t lba_cnt,
|
||||
ftl_io_fn cb_fn, void *cb_ctx)
|
||||
{
|
||||
size_t lbk_cnt, lbk_off;
|
||||
|
||||
lbk_off = offset * sizeof(uint64_t) / FTL_BLOCK_SIZE;
|
||||
lbk_cnt = spdk_divide_round_up(lba_cnt * sizeof(uint64_t), FTL_BLOCK_SIZE);
|
||||
size_t lbk_cnt, lbk_off, num_read, num_segments;
|
||||
struct ftl_lba_map *lba_map = &band->lba_map;
|
||||
struct ftl_lba_map_request *request;
|
||||
int rc = 0;
|
||||
|
||||
lbk_off = offset / FTL_NUM_LBA_IN_BLOCK;
|
||||
num_segments = spdk_divide_round_up(offset + lba_cnt, FTL_NUM_LBA_IN_BLOCK);
|
||||
lbk_cnt = num_segments - lbk_off;
|
||||
assert(lbk_off + lbk_cnt <= ftl_lba_map_num_lbks(band->dev));
|
||||
|
||||
return ftl_band_read_md(band, lbk_cnt, ftl_band_lba_map_ppa(band, lbk_off),
|
||||
ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx);
|
||||
request = ftl_lba_map_alloc_request(band, lbk_off, lbk_cnt, cb_fn, cb_ctx);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
while (lbk_cnt) {
|
||||
if (lba_map->segments[lbk_off] != FTL_LBA_MAP_SEG_CLEAR) {
|
||||
if (lba_map->segments[lbk_off] == FTL_LBA_MAP_SEG_CACHED) {
|
||||
ftl_lba_map_request_segment_done(request, lbk_off, 1);
|
||||
}
|
||||
lbk_cnt--;
|
||||
lbk_off++;
|
||||
continue;
|
||||
}
|
||||
|
||||
num_read = ftl_lba_map_num_clear_segments(lba_map, lbk_off, lbk_cnt);
|
||||
ftl_lba_map_set_segment_state(lba_map, lbk_off, num_read,
|
||||
FTL_LBA_MAP_SEG_PENDING);
|
||||
|
||||
rc = ftl_band_read_md(band, num_read, ftl_band_lba_map_ppa(band, lbk_off),
|
||||
(char *)band->lba_map.dma_buf + lbk_off * FTL_BLOCK_SIZE,
|
||||
ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx);
|
||||
if (rc) {
|
||||
ftl_lba_map_request_free(band->dev, request);
|
||||
return rc;
|
||||
}
|
||||
|
||||
assert(lbk_cnt >= num_read);
|
||||
lbk_cnt -= num_read;
|
||||
lbk_off += num_read;
|
||||
}
|
||||
|
||||
if (request->num_pending) {
|
||||
LIST_INSERT_HEAD(&lba_map->request_list, request, list_entry);
|
||||
} else {
|
||||
cb_fn(NULL, cb_ctx, 0);
|
||||
ftl_lba_map_request_free(band->dev, request);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
@ -916,6 +1058,7 @@ ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
return ftl_band_read_md(band,
|
||||
ftl_head_md_num_lbks(band->dev),
|
||||
ftl_band_head_md_ppa(band),
|
||||
band->lba_map.dma_buf,
|
||||
ftl_read_md_cb,
|
||||
ftl_unpack_head_md,
|
||||
cb_fn,
|
||||
@ -1033,3 +1176,11 @@ ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
/* lba map pool element has size capable to store lba map + segments map */
|
||||
return ftl_lba_map_num_lbks(dev) * FTL_BLOCK_SIZE +
|
||||
spdk_divide_round_up(ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK);
|
||||
}
|
||||
|
@ -40,8 +40,13 @@
|
||||
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_ppa.h"
|
||||
#include "ftl_io.h"
|
||||
|
||||
/* Number of LBAs that could be stored in a single block */
|
||||
#define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(uint64_t))
|
||||
|
||||
struct spdk_ftl_dev;
|
||||
struct ftl_lba_map_request;
|
||||
|
||||
enum ftl_chunk_state {
|
||||
FTL_CHUNK_STATE_FREE,
|
||||
@ -84,6 +89,12 @@ enum ftl_md_status {
|
||||
FTL_MD_INVALID_SIZE
|
||||
};
|
||||
|
||||
enum ftl_lba_map_seg_state {
|
||||
FTL_LBA_MAP_SEG_CLEAR,
|
||||
FTL_LBA_MAP_SEG_PENDING,
|
||||
FTL_LBA_MAP_SEG_CACHED
|
||||
};
|
||||
|
||||
struct ftl_lba_map {
|
||||
/* LBA/vld map lock */
|
||||
pthread_spinlock_t lock;
|
||||
@ -100,6 +111,11 @@ struct ftl_lba_map {
|
||||
/* LBA map (only valid for open/relocating bands) */
|
||||
uint64_t *map;
|
||||
|
||||
/* LBA map segment state map (clear, pending, cached) */
|
||||
uint8_t *segments;
|
||||
|
||||
LIST_HEAD(, ftl_lba_map_request) request_list;
|
||||
|
||||
/* Metadata DMA buffer (only valid for open/relocating bands) */
|
||||
void *dma_buf;
|
||||
};
|
||||
@ -115,6 +131,22 @@ enum ftl_band_state {
|
||||
FTL_BAND_STATE_MAX
|
||||
};
|
||||
|
||||
struct ftl_lba_map_request {
|
||||
/* Completion callback */
|
||||
ftl_io_fn cb;
|
||||
|
||||
/* Completion callback context */
|
||||
void *cb_ctx;
|
||||
|
||||
/* Bit array of requested segments */
|
||||
struct spdk_bit_array *segments;
|
||||
|
||||
/* Number of pending segments to read */
|
||||
size_t num_pending;
|
||||
|
||||
LIST_ENTRY(ftl_lba_map_request) list_entry;
|
||||
};
|
||||
|
||||
struct ftl_band {
|
||||
/* Device this band belongs to */
|
||||
struct spdk_ftl_dev *dev;
|
||||
@ -196,6 +228,7 @@ int ftl_band_erase(struct ftl_band *band);
|
||||
int ftl_band_write_prep(struct ftl_band *band);
|
||||
struct ftl_chunk *ftl_band_next_operational_chunk(struct ftl_band *band,
|
||||
struct ftl_chunk *chunk);
|
||||
size_t ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev);
|
||||
|
||||
static inline int
|
||||
ftl_band_empty(const struct ftl_band *band)
|
||||
|
@ -155,6 +155,9 @@ struct spdk_ftl_dev {
|
||||
/* LBA map memory pool */
|
||||
struct spdk_mempool *lba_pool;
|
||||
|
||||
/* LBA map requests pool */
|
||||
struct spdk_mempool *lba_request_pool;
|
||||
|
||||
/* Statistics */
|
||||
struct ftl_stats stats;
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "spdk_internal/log.h"
|
||||
#include "spdk/ftl.h"
|
||||
#include "ftl_debug.h"
|
||||
#include "ftl_band.h"
|
||||
|
||||
#if defined(DEBUG)
|
||||
#if defined(FTL_META_DEBUG)
|
||||
@ -55,7 +56,7 @@ ftl_band_validate_md(struct ftl_band *band)
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
struct ftl_lba_map *lba_map = &band->lba_map;
|
||||
struct ftl_ppa ppa_md, ppa_l2p;
|
||||
size_t i, size;
|
||||
size_t i, size, seg_off;
|
||||
bool valid = true;
|
||||
|
||||
size = ftl_num_band_lbks(dev);
|
||||
@ -66,6 +67,11 @@ ftl_band_validate_md(struct ftl_band *band)
|
||||
continue;
|
||||
}
|
||||
|
||||
seg_off = i / FTL_NUM_LBA_IN_BLOCK;
|
||||
if (lba_map->segments[seg_off] != FTL_LBA_MAP_SEG_CACHED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ppa_md = ftl_band_ppa_from_lbkoff(band, i);
|
||||
ppa_l2p = ftl_l2p_get(dev, lba_map->map[i]);
|
||||
|
||||
|
@ -512,16 +512,23 @@ spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
|
||||
*conf = g_default_conf;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
|
||||
{
|
||||
struct ftl_lba_map_request *request = obj;
|
||||
struct spdk_ftl_dev *dev = opaque;
|
||||
|
||||
request->segments = spdk_bit_array_create(spdk_divide_round_up(
|
||||
ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK));
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_init_wptr_list(struct spdk_ftl_dev *dev)
|
||||
ftl_init_lba_map_pools(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
#define POOL_NAME_LEN 128
|
||||
char pool_name[POOL_NAME_LEN];
|
||||
int rc;
|
||||
|
||||
LIST_INIT(&dev->wptr_list);
|
||||
LIST_INIT(&dev->flush_list);
|
||||
|
||||
rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool");
|
||||
if (rc < 0 || rc >= POOL_NAME_LEN) {
|
||||
return -ENAMETOOLONG;
|
||||
@ -533,16 +540,39 @@ ftl_init_wptr_list(struct spdk_ftl_dev *dev)
|
||||
* unnecessary overhead and should be replaced by different data structure.
|
||||
*/
|
||||
dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
|
||||
ftl_lba_map_num_lbks(dev) * FTL_BLOCK_SIZE,
|
||||
ftl_lba_map_pool_elem_size(dev),
|
||||
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
if (!dev->lba_pool) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lbareq-pool");
|
||||
if (rc < 0 || rc >= POOL_NAME_LEN) {
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
|
||||
dev->lba_request_pool = spdk_mempool_create_ctor(pool_name,
|
||||
dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs,
|
||||
sizeof(struct ftl_lba_map_request),
|
||||
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
||||
SPDK_ENV_SOCKET_ID_ANY,
|
||||
ftl_lba_map_request_ctor,
|
||||
dev);
|
||||
if (!dev->lba_request_pool) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_init_wptr_list(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
LIST_INIT(&dev->wptr_list);
|
||||
LIST_INIT(&dev->flush_list);
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
@ -952,11 +982,13 @@ spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn c
|
||||
goto fail_sync;
|
||||
}
|
||||
|
||||
if (ftl_init_wptr_list(dev)) {
|
||||
SPDK_ERRLOG("Unable to init wptr\n");
|
||||
if (ftl_init_lba_map_pools(dev)) {
|
||||
SPDK_ERRLOG("Unable to init LBA map pools\n");
|
||||
goto fail_sync;
|
||||
}
|
||||
|
||||
ftl_init_wptr_list(dev);
|
||||
|
||||
if (ftl_dev_init_bands(dev)) {
|
||||
SPDK_ERRLOG("Unable to initialize band array\n");
|
||||
goto fail_sync;
|
||||
@ -1016,6 +1048,14 @@ _ftl_halt_defrag(void *arg)
|
||||
ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
|
||||
{
|
||||
struct ftl_lba_map_request *request = obj;
|
||||
|
||||
spdk_bit_array_free(&request->segments);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
@ -1053,6 +1093,10 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
}
|
||||
|
||||
spdk_mempool_free(dev->lba_pool);
|
||||
if (dev->lba_request_pool) {
|
||||
spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL);
|
||||
}
|
||||
spdk_mempool_free(dev->lba_request_pool);
|
||||
|
||||
ftl_rwb_free(dev->rwb);
|
||||
ftl_reloc_free(dev->reloc);
|
||||
|
@ -181,7 +181,8 @@ ftl_reloc_read_lba_map(struct ftl_band_reloc *breloc)
|
||||
assert(false);
|
||||
}
|
||||
|
||||
return ftl_band_read_lba_map(band, 0, ftl_num_band_lbks(dev), ftl_reloc_read_lba_map_cb, breloc);
|
||||
return ftl_band_read_lba_map(band, 0, ftl_num_band_lbks(dev),
|
||||
ftl_reloc_read_lba_map_cb, breloc);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -132,6 +132,7 @@ cleanup_wptr_test(struct spdk_ftl_dev *dev)
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
|
||||
dev->bands[i].lba_map.segments = NULL;
|
||||
test_free_ftl_band(&dev->bands[i]);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user