lib/ftl: Change ftl_chunk structure to ftl_zone
This is starting point for moving current FTL implementation which is working on top of Open Channel NVMe driver to work on top of abstracted zoned bdev. This patch is changing name of ftl_chunk structure to ftl_zone and start using zone states from zdev interface. Change-Id: I5429f489cc08a1ac27f09aba3dca4b40ea95eeb3 Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/467391 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
2938dc14b0
commit
77d591e172
@ -150,10 +150,10 @@ struct spdk_ftl_attrs {
|
||||
size_t lbk_size;
|
||||
/* Write buffer cache */
|
||||
struct spdk_bdev_desc *cache_bdev_desc;
|
||||
/* Number of chunks per parallel unit in the underlying device (including any offline ones) */
|
||||
size_t num_chunks;
|
||||
/* Number of sectors per chunk */
|
||||
size_t chunk_size;
|
||||
/* Number of zones per parallel unit in the underlying device (including any offline ones) */
|
||||
size_t num_zones;
|
||||
/* Number of logical blocks per zone */
|
||||
size_t zone_size;
|
||||
/* Device specific configuration */
|
||||
struct spdk_ftl_conf conf;
|
||||
};
|
||||
|
@ -137,7 +137,7 @@ ftl_anm_event_alloc(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
|
||||
break;
|
||||
case FTL_ANM_RANGE_CHK:
|
||||
case FTL_ANM_RANGE_PU:
|
||||
event->num_lbks = ftl_dev_lbks_in_chunk(dev);
|
||||
event->num_lbks = ftl_dev_lbks_in_zone(dev);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
@ -160,9 +160,9 @@ ftl_anm_process_log(struct ftl_anm_poller *poller,
|
||||
num_bands = range != FTL_ANM_RANGE_PU ? 1 : ftl_dev_num_bands(dev);
|
||||
|
||||
for (i = 0; i < num_bands; ++i) {
|
||||
struct ftl_chunk *chk = ftl_band_chunk_from_ppa(&dev->bands[i], ppa);
|
||||
struct ftl_zone *zone = ftl_band_zone_from_ppa(&dev->bands[i], ppa);
|
||||
|
||||
if (chk->state == FTL_CHUNK_STATE_BAD) {
|
||||
if (zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ ftl_band_free_lba_map(struct ftl_band *band)
|
||||
assert(!band->high_prio);
|
||||
|
||||
/* Verify that band's metadata is consistent with l2p */
|
||||
if (band->num_chunks) {
|
||||
if (band->num_zones) {
|
||||
assert(ftl_band_validate_md(band) == true);
|
||||
}
|
||||
|
||||
@ -224,7 +224,7 @@ static void
|
||||
_ftl_band_set_closed(struct ftl_band *band)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
|
||||
/* Set the state as free_md() checks for that */
|
||||
band->state = FTL_BAND_STATE_CLOSED;
|
||||
@ -232,10 +232,10 @@ _ftl_band_set_closed(struct ftl_band *band)
|
||||
/* Free the lba map if there are no outstanding IOs */
|
||||
ftl_band_release_lba_map(band);
|
||||
|
||||
if (spdk_likely(band->num_chunks)) {
|
||||
if (spdk_likely(band->num_zones)) {
|
||||
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
|
||||
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
|
||||
chunk->state = FTL_CHUNK_STATE_CLOSED;
|
||||
CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
|
||||
zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
|
||||
}
|
||||
} else {
|
||||
LIST_REMOVE(band, list_entry);
|
||||
@ -335,7 +335,7 @@ ftl_unpack_tail_md(struct ftl_band *band)
|
||||
|
||||
/*
|
||||
* When restoring from a dirty shutdown it's possible old tail meta wasn't yet cleared -
|
||||
* band had saved head meta, but didn't manage to send erase to all chunks.
|
||||
* band had saved head meta, but didn't manage to send erase to all zones.
|
||||
* The already found tail md header is valid, but inconsistent with the head meta. Treat
|
||||
* such a band as open/without valid tail md.
|
||||
*/
|
||||
@ -386,28 +386,28 @@ struct ftl_ppa
|
||||
ftl_band_tail_md_ppa(struct ftl_band *band)
|
||||
{
|
||||
struct ftl_ppa ppa = {};
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
size_t xfer_size = dev->xfer_size;
|
||||
size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
|
||||
size_t i;
|
||||
|
||||
if (spdk_unlikely(!band->num_chunks)) {
|
||||
if (spdk_unlikely(!band->num_zones)) {
|
||||
return ftl_to_ppa(FTL_PPA_INVALID);
|
||||
}
|
||||
|
||||
/* Metadata should be aligned to xfer size */
|
||||
assert(ftl_band_tail_md_offset(band) % xfer_size == 0);
|
||||
|
||||
chunk = CIRCLEQ_FIRST(&band->chunks);
|
||||
for (i = 0; i < num_req % band->num_chunks; ++i) {
|
||||
chunk = ftl_band_next_chunk(band, chunk);
|
||||
zone = CIRCLEQ_FIRST(&band->zones);
|
||||
for (i = 0; i < num_req % band->num_zones; ++i) {
|
||||
zone = ftl_band_next_zone(band, zone);
|
||||
}
|
||||
|
||||
ppa.lbk = (num_req / band->num_chunks) * xfer_size;
|
||||
ppa.lbk = (num_req / band->num_zones) * xfer_size;
|
||||
ppa.chk = band->id;
|
||||
ppa.pu = chunk->punit->start_ppa.pu;
|
||||
ppa.grp = chunk->punit->start_ppa.grp;
|
||||
ppa.pu = zone->punit->start_ppa.pu;
|
||||
ppa.grp = zone->punit->start_ppa.grp;
|
||||
|
||||
return ppa;
|
||||
}
|
||||
@ -417,11 +417,11 @@ ftl_band_head_md_ppa(struct ftl_band *band)
|
||||
{
|
||||
struct ftl_ppa ppa;
|
||||
|
||||
if (spdk_unlikely(!band->num_chunks)) {
|
||||
if (spdk_unlikely(!band->num_zones)) {
|
||||
return ftl_to_ppa(FTL_PPA_INVALID);
|
||||
}
|
||||
|
||||
ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa;
|
||||
ppa = CIRCLEQ_FIRST(&band->zones)->punit->start_ppa;
|
||||
ppa.chk = band->id;
|
||||
|
||||
return ppa;
|
||||
@ -482,7 +482,7 @@ ftl_band_age(const struct ftl_band *band)
|
||||
size_t
|
||||
ftl_band_num_usable_lbks(const struct ftl_band *band)
|
||||
{
|
||||
return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
|
||||
return band->num_zones * ftl_dev_lbks_in_zone(band->dev);
|
||||
}
|
||||
|
||||
size_t
|
||||
@ -516,8 +516,8 @@ ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
|
||||
return &dev->bands[ppa.chk];
|
||||
}
|
||||
|
||||
struct ftl_chunk *
|
||||
ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
struct ftl_zone *
|
||||
ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
unsigned int punit;
|
||||
@ -525,7 +525,7 @@ ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
punit = ftl_ppa_flatten_punit(dev, ppa);
|
||||
assert(punit < ftl_dev_num_punits(dev));
|
||||
|
||||
return &band->chunk_buf[punit];
|
||||
return &band->zone_buf[punit];
|
||||
}
|
||||
|
||||
uint64_t
|
||||
@ -537,67 +537,67 @@ ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
punit = ftl_ppa_flatten_punit(dev, ppa);
|
||||
assert(ppa.chk == band->id);
|
||||
|
||||
return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
|
||||
return punit * ftl_dev_lbks_in_zone(dev) + ppa.lbk;
|
||||
}
|
||||
|
||||
struct ftl_ppa
|
||||
ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
unsigned int punit_num;
|
||||
size_t num_xfers, num_stripes;
|
||||
|
||||
assert(ppa.chk == band->id);
|
||||
|
||||
punit_num = ftl_ppa_flatten_punit(dev, ppa);
|
||||
chunk = &band->chunk_buf[punit_num];
|
||||
zone = &band->zone_buf[punit_num];
|
||||
|
||||
num_lbks += (ppa.lbk % dev->xfer_size);
|
||||
ppa.lbk -= (ppa.lbk % dev->xfer_size);
|
||||
|
||||
#if defined(DEBUG)
|
||||
/* Check that the number of chunks has not been changed */
|
||||
struct ftl_chunk *_chunk;
|
||||
size_t _num_chunks = 0;
|
||||
CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) {
|
||||
if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) {
|
||||
_num_chunks++;
|
||||
/* Check that the number of zones has not been changed */
|
||||
struct ftl_zone *_zone;
|
||||
size_t _num_zones = 0;
|
||||
CIRCLEQ_FOREACH(_zone, &band->zones, circleq) {
|
||||
if (spdk_likely(_zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE)) {
|
||||
_num_zones++;
|
||||
}
|
||||
}
|
||||
assert(band->num_chunks == _num_chunks);
|
||||
assert(band->num_zones == _num_zones);
|
||||
#endif
|
||||
assert(band->num_chunks != 0);
|
||||
num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks;
|
||||
assert(band->num_zones != 0);
|
||||
num_stripes = (num_lbks / dev->xfer_size) / band->num_zones;
|
||||
ppa.lbk += num_stripes * dev->xfer_size;
|
||||
num_lbks -= num_stripes * dev->xfer_size * band->num_chunks;
|
||||
num_lbks -= num_stripes * dev->xfer_size * band->num_zones;
|
||||
|
||||
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
|
||||
if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
|
||||
return ftl_to_ppa(FTL_PPA_INVALID);
|
||||
}
|
||||
|
||||
num_xfers = num_lbks / dev->xfer_size;
|
||||
for (size_t i = 0; i < num_xfers; ++i) {
|
||||
/* When the last chunk is reached the lbk part of the address */
|
||||
/* When the last zone is reached the lbk part of the address */
|
||||
/* needs to be increased by xfer_size */
|
||||
if (ftl_band_chunk_is_last(band, chunk)) {
|
||||
if (ftl_band_zone_is_last(band, zone)) {
|
||||
ppa.lbk += dev->xfer_size;
|
||||
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
|
||||
if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
|
||||
return ftl_to_ppa(FTL_PPA_INVALID);
|
||||
}
|
||||
}
|
||||
|
||||
chunk = ftl_band_next_operational_chunk(band, chunk);
|
||||
assert(chunk);
|
||||
ppa.grp = chunk->start_ppa.grp;
|
||||
ppa.pu = chunk->start_ppa.pu;
|
||||
zone = ftl_band_next_operational_zone(band, zone);
|
||||
assert(zone);
|
||||
ppa.grp = zone->start_ppa.grp;
|
||||
ppa.pu = zone->start_ppa.pu;
|
||||
|
||||
num_lbks -= dev->xfer_size;
|
||||
}
|
||||
|
||||
if (num_lbks) {
|
||||
ppa.lbk += num_lbks;
|
||||
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
|
||||
if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
|
||||
return ftl_to_ppa(FTL_PPA_INVALID);
|
||||
}
|
||||
}
|
||||
@ -608,18 +608,18 @@ ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbk
|
||||
static size_t
|
||||
ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
{
|
||||
struct ftl_chunk *chunk, *current_chunk;
|
||||
struct ftl_zone *zone, *current_zone;
|
||||
unsigned int punit_offset = 0;
|
||||
size_t off, num_stripes, xfer_size = band->dev->xfer_size;
|
||||
|
||||
assert(ppa.chk == band->id);
|
||||
|
||||
num_stripes = (ppa.lbk / xfer_size) * band->num_chunks;
|
||||
num_stripes = (ppa.lbk / xfer_size) * band->num_zones;
|
||||
off = ppa.lbk % xfer_size;
|
||||
|
||||
current_chunk = ftl_band_chunk_from_ppa(band, ppa);
|
||||
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
|
||||
if (current_chunk == chunk) {
|
||||
current_zone = ftl_band_zone_from_ppa(band, ppa);
|
||||
CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
|
||||
if (current_zone == zone) {
|
||||
break;
|
||||
}
|
||||
punit_offset++;
|
||||
@ -635,9 +635,9 @@ ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
uint64_t punit;
|
||||
|
||||
punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
|
||||
punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;
|
||||
|
||||
ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
|
||||
ppa.lbk = lbkoff % ftl_dev_lbks_in_zone(dev);
|
||||
ppa.chk = band->id;
|
||||
ppa.pu = punit / dev->geo.num_grp;
|
||||
ppa.grp = punit % dev->geo.num_grp;
|
||||
@ -823,7 +823,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
struct ftl_md_io *io;
|
||||
|
||||
if (spdk_unlikely(!band->num_chunks)) {
|
||||
if (spdk_unlikely(!band->num_zones)) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@ -1036,46 +1036,46 @@ ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
|
||||
ftl_band_remove_zone(struct ftl_band *band, struct ftl_zone *zone)
|
||||
{
|
||||
CIRCLEQ_REMOVE(&band->chunks, chunk, circleq);
|
||||
band->num_chunks--;
|
||||
CIRCLEQ_REMOVE(&band->zones, zone, circleq);
|
||||
band->num_zones--;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_erase_fail(struct ftl_io *io, int status)
|
||||
{
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
struct ftl_band *band = io->band;
|
||||
char buf[128];
|
||||
|
||||
SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
|
||||
ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);
|
||||
|
||||
chunk = ftl_band_chunk_from_ppa(band, io->ppa);
|
||||
chunk->state = FTL_CHUNK_STATE_BAD;
|
||||
ftl_band_remove_chunk(band, chunk);
|
||||
zone = ftl_band_zone_from_ppa(band, io->ppa);
|
||||
zone->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
|
||||
ftl_band_remove_zone(band, zone);
|
||||
band->tail_md_ppa = ftl_band_tail_md_ppa(band);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_band_erase_cb(struct ftl_io *io, void *ctx, int status)
|
||||
{
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
|
||||
if (spdk_unlikely(status)) {
|
||||
ftl_erase_fail(io, status);
|
||||
return;
|
||||
}
|
||||
chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
|
||||
chunk->state = FTL_CHUNK_STATE_FREE;
|
||||
chunk->write_offset = 0;
|
||||
zone = ftl_band_zone_from_ppa(io->band, io->ppa);
|
||||
zone->state = SPDK_BDEV_ZONE_STATE_EMPTY;
|
||||
zone->write_offset = 0;
|
||||
}
|
||||
|
||||
int
|
||||
ftl_band_erase(struct ftl_band *band)
|
||||
{
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
struct ftl_io *io;
|
||||
int rc = 0;
|
||||
|
||||
@ -1084,8 +1084,8 @@ ftl_band_erase(struct ftl_band *band)
|
||||
|
||||
ftl_band_set_state(band, FTL_BAND_STATE_PREP);
|
||||
|
||||
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
|
||||
if (chunk->state == FTL_CHUNK_STATE_FREE) {
|
||||
CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
|
||||
if (zone->state == SPDK_BDEV_ZONE_STATE_EMPTY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1095,7 +1095,7 @@ ftl_band_erase(struct ftl_band *band)
|
||||
break;
|
||||
}
|
||||
|
||||
io->ppa = chunk->start_ppa;
|
||||
io->ppa = zone->start_ppa;
|
||||
rc = ftl_io_erase(io);
|
||||
if (rc) {
|
||||
assert(0);
|
||||
@ -1120,27 +1120,27 @@ ftl_band_write_prep(struct ftl_band *band)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ftl_chunk *
|
||||
ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
|
||||
struct ftl_zone *
|
||||
ftl_band_next_operational_zone(struct ftl_band *band, struct ftl_zone *zone)
|
||||
{
|
||||
struct ftl_chunk *result = NULL;
|
||||
struct ftl_chunk *entry;
|
||||
struct ftl_zone *result = NULL;
|
||||
struct ftl_zone *entry;
|
||||
|
||||
if (spdk_unlikely(!band->num_chunks)) {
|
||||
if (spdk_unlikely(!band->num_zones)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Erasing band may fail after it was assigned to wptr. */
|
||||
/* In such a case chunk is no longer in band->chunks queue. */
|
||||
if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) {
|
||||
result = ftl_band_next_chunk(band, chunk);
|
||||
/* In such a case zone is no longer in band->zones queue. */
|
||||
if (spdk_likely(zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE)) {
|
||||
result = ftl_band_next_zone(band, zone);
|
||||
} else {
|
||||
CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) {
|
||||
if (entry->pos > chunk->pos) {
|
||||
CIRCLEQ_FOREACH_REVERSE(entry, &band->zones, circleq) {
|
||||
if (entry->pos > zone->pos) {
|
||||
result = entry;
|
||||
} else {
|
||||
if (!result) {
|
||||
result = CIRCLEQ_FIRST(&band->chunks);
|
||||
result = CIRCLEQ_FIRST(&band->zones);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "spdk/stdinc.h"
|
||||
#include "spdk/bit_array.h"
|
||||
#include "spdk/queue.h"
|
||||
#include "spdk/bdev_zone.h"
|
||||
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_ppa.h"
|
||||
@ -48,17 +49,9 @@
|
||||
struct spdk_ftl_dev;
|
||||
struct ftl_lba_map_request;
|
||||
|
||||
enum ftl_chunk_state {
|
||||
FTL_CHUNK_STATE_FREE,
|
||||
FTL_CHUNK_STATE_OPEN,
|
||||
FTL_CHUNK_STATE_CLOSED,
|
||||
FTL_CHUNK_STATE_BAD,
|
||||
FTL_CHUNK_STATE_VACANT,
|
||||
};
|
||||
|
||||
struct ftl_chunk {
|
||||
/* Block state */
|
||||
enum ftl_chunk_state state;
|
||||
struct ftl_zone {
|
||||
/* Zone state */
|
||||
enum spdk_bdev_zone_state state;
|
||||
|
||||
/* Indicates that there is inflight write */
|
||||
bool busy;
|
||||
@ -72,10 +65,10 @@ struct ftl_chunk {
|
||||
/* Pointer to parallel unit */
|
||||
struct ftl_punit *punit;
|
||||
|
||||
/* Position in band's chunk_buf */
|
||||
/* Position in band's zone_buf */
|
||||
uint32_t pos;
|
||||
|
||||
CIRCLEQ_ENTRY(ftl_chunk) circleq;
|
||||
CIRCLEQ_ENTRY(ftl_zone) circleq;
|
||||
};
|
||||
|
||||
enum ftl_md_status {
|
||||
@ -154,14 +147,14 @@ struct ftl_band {
|
||||
/* Device this band belongs to */
|
||||
struct spdk_ftl_dev *dev;
|
||||
|
||||
/* Number of operational chunks */
|
||||
size_t num_chunks;
|
||||
/* Number of operational zones */
|
||||
size_t num_zones;
|
||||
|
||||
/* Array of chunks */
|
||||
struct ftl_chunk *chunk_buf;
|
||||
/* Array of zones */
|
||||
struct ftl_zone *zone_buf;
|
||||
|
||||
/* List of operational chunks */
|
||||
CIRCLEQ_HEAD(, ftl_chunk) chunks;
|
||||
/* List of operational zones */
|
||||
CIRCLEQ_HEAD(, ftl_zone) zones;
|
||||
|
||||
/* LBA map */
|
||||
struct ftl_lba_map lba_map;
|
||||
@ -223,7 +216,7 @@ size_t ftl_band_user_lbks(const struct ftl_band *band);
|
||||
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
|
||||
struct ftl_ppa ppa);
|
||||
struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa);
|
||||
struct ftl_chunk *ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa);
|
||||
struct ftl_zone *ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa);
|
||||
void ftl_band_md_clear(struct ftl_band *band);
|
||||
int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa,
|
||||
ftl_io_fn cb_fn, void *cb_ctx);
|
||||
@ -236,8 +229,8 @@ void ftl_band_write_failed(struct ftl_band *band);
|
||||
int ftl_band_full(struct ftl_band *band, size_t offset);
|
||||
int ftl_band_erase(struct ftl_band *band);
|
||||
int ftl_band_write_prep(struct ftl_band *band);
|
||||
struct ftl_chunk *ftl_band_next_operational_chunk(struct ftl_band *band,
|
||||
struct ftl_chunk *chunk);
|
||||
struct ftl_zone *ftl_band_next_operational_zone(struct ftl_band *band,
|
||||
struct ftl_zone *zone);
|
||||
size_t ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev);
|
||||
|
||||
static inline int
|
||||
@ -246,11 +239,11 @@ ftl_band_empty(const struct ftl_band *band)
|
||||
return band->lba_map.num_vld == 0;
|
||||
}
|
||||
|
||||
static inline struct ftl_chunk *
|
||||
ftl_band_next_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
|
||||
static inline struct ftl_zone *
|
||||
ftl_band_next_zone(struct ftl_band *band, struct ftl_zone *zone)
|
||||
{
|
||||
assert(chunk->state != FTL_CHUNK_STATE_BAD);
|
||||
return CIRCLEQ_LOOP_NEXT(&band->chunks, chunk, circleq);
|
||||
assert(zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE);
|
||||
return CIRCLEQ_LOOP_NEXT(&band->zones, zone, circleq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -282,23 +275,23 @@ ftl_band_lbkoff_valid(struct ftl_band *band, size_t lbkoff)
|
||||
}
|
||||
|
||||
static inline int
|
||||
ftl_band_chunk_is_last(struct ftl_band *band, struct ftl_chunk *chunk)
|
||||
ftl_band_zone_is_last(struct ftl_band *band, struct ftl_zone *zone)
|
||||
{
|
||||
return chunk == CIRCLEQ_LAST(&band->chunks);
|
||||
return zone == CIRCLEQ_LAST(&band->zones);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ftl_band_chunk_is_first(struct ftl_band *band, struct ftl_chunk *chunk)
|
||||
ftl_band_zone_is_first(struct ftl_band *band, struct ftl_zone *zone)
|
||||
{
|
||||
return chunk == CIRCLEQ_FIRST(&band->chunks);
|
||||
return zone == CIRCLEQ_FIRST(&band->zones);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ftl_chunk_is_writable(const struct ftl_chunk *chunk)
|
||||
ftl_zone_is_writable(const struct ftl_zone *zone)
|
||||
{
|
||||
return (chunk->state == FTL_CHUNK_STATE_OPEN ||
|
||||
chunk->state == FTL_CHUNK_STATE_FREE) &&
|
||||
!chunk->busy;
|
||||
return (zone->state == SPDK_BDEV_ZONE_STATE_OPEN ||
|
||||
zone->state == SPDK_BDEV_ZONE_STATE_EMPTY) &&
|
||||
!zone->busy;
|
||||
}
|
||||
|
||||
#endif /* FTL_BAND_H */
|
||||
|
@ -74,8 +74,8 @@ struct ftl_wptr {
|
||||
/* Current logical block's offset */
|
||||
uint64_t offset;
|
||||
|
||||
/* Current erase block */
|
||||
struct ftl_chunk *chunk;
|
||||
/* Current zone */
|
||||
struct ftl_zone *zone;
|
||||
|
||||
/* Pending IO queue */
|
||||
TAILQ_HEAD(, ftl_io) pending_queue;
|
||||
@ -295,7 +295,7 @@ ftl_ppa_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa)
|
||||
assert(!ftl_ppa_invalid(*ppa));
|
||||
|
||||
/* Metadata has to be read in the way it's written (jumping across */
|
||||
/* the chunks in xfer_size increments) */
|
||||
/* the zones in xfer_size increments) */
|
||||
if (io->flags & FTL_IO_MD) {
|
||||
max_lbks = dev->xfer_size - (ppa->lbk % dev->xfer_size);
|
||||
lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks);
|
||||
@ -322,7 +322,7 @@ ftl_wptr_open_band(struct ftl_wptr *wptr)
|
||||
{
|
||||
struct ftl_band *band = wptr->band;
|
||||
|
||||
assert(ftl_band_chunk_is_first(band, wptr->chunk));
|
||||
assert(ftl_band_zone_is_first(band, wptr->zone));
|
||||
assert(band->lba_map.num_vld == 0);
|
||||
|
||||
ftl_band_clear_lba_map(band);
|
||||
@ -339,17 +339,16 @@ ftl_submit_erase(struct ftl_io *io)
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
struct ftl_band *band = io->band;
|
||||
struct ftl_ppa ppa = io->ppa;
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
uint64_t ppa_packed;
|
||||
int rc = 0;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < io->lbk_cnt; ++i) {
|
||||
if (i != 0) {
|
||||
chunk = ftl_band_next_chunk(band, ftl_band_chunk_from_ppa(band, ppa));
|
||||
assert(chunk->state == FTL_CHUNK_STATE_CLOSED ||
|
||||
chunk->state == FTL_CHUNK_STATE_VACANT);
|
||||
ppa = chunk->start_ppa;
|
||||
zone = ftl_band_next_zone(band, ftl_band_zone_from_ppa(band, ppa));
|
||||
assert(zone->state == SPDK_BDEV_ZONE_STATE_CLOSED);
|
||||
ppa = zone->start_ppa;
|
||||
}
|
||||
|
||||
assert(ppa.lbk == 0);
|
||||
@ -474,8 +473,8 @@ ftl_wptr_init(struct ftl_band *band)
|
||||
|
||||
wptr->dev = dev;
|
||||
wptr->band = band;
|
||||
wptr->chunk = CIRCLEQ_FIRST(&band->chunks);
|
||||
wptr->ppa = wptr->chunk->start_ppa;
|
||||
wptr->zone = CIRCLEQ_FIRST(&band->zones);
|
||||
wptr->ppa = wptr->zone->start_ppa;
|
||||
TAILQ_INIT(&wptr->pending_queue);
|
||||
|
||||
return wptr;
|
||||
@ -580,13 +579,13 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
|
||||
ftl_band_set_state(band, FTL_BAND_STATE_FULL);
|
||||
}
|
||||
|
||||
wptr->chunk->busy = true;
|
||||
wptr->zone->busy = true;
|
||||
wptr->ppa = ftl_band_next_xfer_ppa(band, wptr->ppa, xfer_size);
|
||||
wptr->chunk = ftl_band_next_operational_chunk(band, wptr->chunk);
|
||||
wptr->zone = ftl_band_next_operational_zone(band, wptr->zone);
|
||||
|
||||
assert(!ftl_ppa_invalid(wptr->ppa));
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d chunk:%d, lbk:%u\n",
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d zone:%d, lbk:%u\n",
|
||||
wptr->ppa.grp, wptr->ppa.pu, wptr->ppa.chk, wptr->ppa.lbk);
|
||||
|
||||
if (wptr->offset >= next_thld && !dev->next_band) {
|
||||
@ -607,9 +606,9 @@ ftl_wptr_ready(struct ftl_wptr *wptr)
|
||||
|
||||
/* TODO: add handling of empty bands */
|
||||
|
||||
if (spdk_unlikely(!ftl_chunk_is_writable(wptr->chunk))) {
|
||||
if (spdk_unlikely(!ftl_zone_is_writable(wptr->zone))) {
|
||||
/* Erasing band may fail after it was assigned to wptr. */
|
||||
if (spdk_unlikely(wptr->chunk->state == FTL_CHUNK_STATE_BAD)) {
|
||||
if (spdk_unlikely(wptr->zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE)) {
|
||||
ftl_wptr_advance(wptr, wptr->dev->xfer_size);
|
||||
}
|
||||
return 0;
|
||||
@ -1477,14 +1476,14 @@ ftl_io_init_child_write(struct ftl_io *parent, struct ftl_ppa ppa,
|
||||
static void
|
||||
ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status)
|
||||
{
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
struct ftl_wptr *wptr;
|
||||
|
||||
chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
|
||||
zone = ftl_band_zone_from_ppa(io->band, io->ppa);
|
||||
wptr = ftl_wptr_from_band(io->band);
|
||||
|
||||
chunk->busy = false;
|
||||
chunk->write_offset += io->lbk_cnt;
|
||||
zone->busy = false;
|
||||
zone->write_offset += io->lbk_cnt;
|
||||
|
||||
/* If some other write on the same band failed the write pointer would already be freed */
|
||||
if (spdk_likely(wptr)) {
|
||||
@ -1508,7 +1507,7 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
|
||||
ppa = io->ppa;
|
||||
}
|
||||
|
||||
/* Split IO to child requests and release chunk immediately after child is completed */
|
||||
/* Split IO to child requests and release zone immediately after child is completed */
|
||||
child = ftl_io_init_child_write(io, ppa, ftl_io_iovec_addr(io),
|
||||
ftl_io_get_md(io), ftl_io_child_write_cb);
|
||||
if (!child) {
|
||||
@ -1545,8 +1544,8 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
|
||||
|
||||
while (io->iov_pos < io->iov_cnt) {
|
||||
/* There are no guarantees of the order of completion of NVMe IO submission queue */
|
||||
/* so wait until chunk is not busy before submitting another write */
|
||||
if (wptr->chunk->busy) {
|
||||
/* so wait until zone is not busy before submitting another write */
|
||||
if (wptr->zone->busy) {
|
||||
TAILQ_INSERT_TAIL(&wptr->pending_queue, io, retry_entry);
|
||||
rc = -EAGAIN;
|
||||
break;
|
||||
@ -1888,8 +1887,8 @@ spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *at
|
||||
attrs->lbk_size = FTL_BLOCK_SIZE;
|
||||
attrs->range = dev->range;
|
||||
attrs->cache_bdev_desc = dev->nv_cache.bdev_desc;
|
||||
attrs->num_chunks = dev->geo.num_chk;
|
||||
attrs->chunk_size = dev->geo.clba;
|
||||
attrs->num_zones = dev->geo.num_chk;
|
||||
attrs->zone_size = dev->geo.clba;
|
||||
attrs->conf = dev->conf;
|
||||
}
|
||||
|
||||
@ -2150,9 +2149,9 @@ ftl_process_anm_event(struct ftl_anm_event *event)
|
||||
bool
|
||||
ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa)
|
||||
{
|
||||
struct ftl_chunk *chunk = ftl_band_chunk_from_ppa(band, ppa);
|
||||
struct ftl_zone *zone = ftl_band_zone_from_ppa(band, ppa);
|
||||
|
||||
return ppa.lbk < chunk->write_offset;
|
||||
return ppa.lbk < zone->write_offset;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -52,7 +52,7 @@
|
||||
|
||||
struct spdk_ftl_dev;
|
||||
struct ftl_band;
|
||||
struct ftl_chunk;
|
||||
struct ftl_zone;
|
||||
struct ftl_io;
|
||||
struct ftl_restore;
|
||||
struct ftl_wptr;
|
||||
@ -484,7 +484,7 @@ ftl_dev_num_bands(const struct spdk_ftl_dev *dev)
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev *dev)
|
||||
ftl_dev_lbks_in_zone(const struct spdk_ftl_dev *dev)
|
||||
{
|
||||
return dev->geo.clba;
|
||||
}
|
||||
@ -498,7 +498,7 @@ ftl_dev_num_punits(const struct spdk_ftl_dev *dev)
|
||||
static inline uint64_t
|
||||
ftl_num_band_lbks(const struct spdk_ftl_dev *dev)
|
||||
{
|
||||
return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_chunk(dev);
|
||||
return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_zone(dev);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
|
@ -107,17 +107,17 @@ ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!dev->bands[i].num_chunks) {
|
||||
ftl_debug(" Band %3zu: all chunks are offline\n", i + 1);
|
||||
if (!dev->bands[i].num_zones) {
|
||||
ftl_debug(" Band %3zu: all zones are offline\n", i + 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
total += dev->bands[i].lba_map.num_vld;
|
||||
ftl_debug(" Band %3zu: %8zu / %zu \tnum_chunks: %zu \twr_cnt: %"PRIu64"\tmerit:"
|
||||
ftl_debug(" Band %3zu: %8zu / %zu \tnum_zones: %zu \twr_cnt: %"PRIu64"\tmerit:"
|
||||
"%10.3f\tstate: %s\n",
|
||||
i + 1, dev->bands[i].lba_map.num_vld,
|
||||
ftl_band_user_lbks(&dev->bands[i]),
|
||||
dev->bands[i].num_chunks,
|
||||
dev->bands[i].num_zones,
|
||||
dev->bands[i].wr_cnt,
|
||||
dev->bands[i].merit,
|
||||
ftl_band_state_str[dev->bands[i].state]);
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "spdk/ftl.h"
|
||||
#include "spdk/likely.h"
|
||||
#include "spdk/string.h"
|
||||
#include "spdk/bdev_zone.h"
|
||||
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_anm.h"
|
||||
@ -259,26 +260,26 @@ ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *
|
||||
}
|
||||
|
||||
static unsigned char
|
||||
ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info)
|
||||
ftl_get_zone_state(const struct spdk_ocssd_chunk_information_entry *info)
|
||||
{
|
||||
if (info->cs.free) {
|
||||
return FTL_CHUNK_STATE_FREE;
|
||||
return SPDK_BDEV_ZONE_STATE_EMPTY;
|
||||
}
|
||||
|
||||
if (info->cs.open) {
|
||||
return FTL_CHUNK_STATE_OPEN;
|
||||
return SPDK_BDEV_ZONE_STATE_OPEN;
|
||||
}
|
||||
|
||||
if (info->cs.closed) {
|
||||
return FTL_CHUNK_STATE_CLOSED;
|
||||
return SPDK_BDEV_ZONE_STATE_CLOSED;
|
||||
}
|
||||
|
||||
if (info->cs.offline) {
|
||||
return FTL_CHUNK_STATE_BAD;
|
||||
return SPDK_BDEV_ZONE_STATE_OFFLINE;
|
||||
}
|
||||
|
||||
assert(0 && "Invalid block state");
|
||||
return FTL_CHUNK_STATE_BAD;
|
||||
return SPDK_BDEV_ZONE_STATE_OFFLINE;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -289,7 +290,7 @@ ftl_remove_empty_bands(struct spdk_ftl_dev *dev)
|
||||
/* Remove band from shut_bands list to prevent further processing */
|
||||
/* if all blocks on this band are bad */
|
||||
LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
|
||||
if (!band->num_chunks) {
|
||||
if (!band->num_zones) {
|
||||
dev->num_bands--;
|
||||
LIST_REMOVE(band, list_entry);
|
||||
}
|
||||
@ -302,7 +303,7 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
|
||||
struct spdk_ocssd_chunk_information_entry *info;
|
||||
struct ftl_band *band, *pband;
|
||||
struct ftl_punit *punit;
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
unsigned int i, j;
|
||||
char buf[128];
|
||||
int rc = 0;
|
||||
@ -335,9 +336,9 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
|
||||
}
|
||||
pband = band;
|
||||
|
||||
CIRCLEQ_INIT(&band->chunks);
|
||||
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
|
||||
if (!band->chunk_buf) {
|
||||
CIRCLEQ_INIT(&band->zones);
|
||||
band->zone_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->zone_buf));
|
||||
if (!band->zone_buf) {
|
||||
SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
|
||||
rc = -1;
|
||||
goto out;
|
||||
@ -369,17 +370,17 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
|
||||
|
||||
for (j = 0; j < ftl_dev_num_bands(dev); ++j) {
|
||||
band = &dev->bands[j];
|
||||
chunk = &band->chunk_buf[i];
|
||||
chunk->pos = i;
|
||||
chunk->state = ftl_get_chunk_state(&info[j]);
|
||||
chunk->punit = punit;
|
||||
chunk->start_ppa = punit->start_ppa;
|
||||
chunk->start_ppa.chk = band->id;
|
||||
chunk->write_offset = ftl_dev_lbks_in_chunk(dev);
|
||||
zone = &band->zone_buf[i];
|
||||
zone->pos = i;
|
||||
zone->state = ftl_get_zone_state(&info[j]);
|
||||
zone->punit = punit;
|
||||
zone->start_ppa = punit->start_ppa;
|
||||
zone->start_ppa.chk = band->id;
|
||||
zone->write_offset = ftl_dev_lbks_in_zone(dev);
|
||||
|
||||
if (chunk->state != FTL_CHUNK_STATE_BAD) {
|
||||
band->num_chunks++;
|
||||
CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
|
||||
if (zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE) {
|
||||
band->num_zones++;
|
||||
CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -693,7 +694,7 @@ ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
|
||||
int cnt = 0;
|
||||
|
||||
LIST_FOREACH(band, &dev->shut_bands, list_entry) {
|
||||
if (band->num_chunks && !band->lba_map.num_vld) {
|
||||
if (band->num_zones && !band->lba_map.num_vld) {
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
@ -1246,7 +1247,7 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
|
||||
if (dev->bands) {
|
||||
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
|
||||
free(dev->bands[i].chunk_buf);
|
||||
free(dev->bands[i].zone_buf);
|
||||
spdk_bit_array_free(&dev->bands[i].lba_map.vld);
|
||||
spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
|
||||
}
|
||||
|
@ -100,11 +100,11 @@ struct ftl_band_reloc {
|
||||
|
||||
/* Reloc map iterator */
|
||||
struct {
|
||||
/* Array of chunk offsets */
|
||||
size_t *chk_offset;
|
||||
/* Array of zone offsets */
|
||||
size_t *zone_offset;
|
||||
|
||||
/* Currently chunk */
|
||||
size_t chk_current;
|
||||
/* Current zone */
|
||||
size_t zone_current;
|
||||
} iter;
|
||||
|
||||
/* Number of outstanding moves */
|
||||
@ -160,19 +160,19 @@ ftl_reloc_is_defrag_active(const struct ftl_reloc *reloc)
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_reloc_iter_chk_offset(struct ftl_band_reloc *breloc)
|
||||
ftl_reloc_iter_zone_offset(struct ftl_band_reloc *breloc)
|
||||
{
|
||||
size_t chunk = breloc->iter.chk_current;
|
||||
size_t zone = breloc->iter.zone_current;
|
||||
|
||||
return breloc->iter.chk_offset[chunk];
|
||||
return breloc->iter.zone_offset[zone];
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_reloc_iter_chk_done(struct ftl_band_reloc *breloc)
|
||||
ftl_reloc_iter_zone_done(struct ftl_band_reloc *breloc)
|
||||
{
|
||||
size_t num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev);
|
||||
size_t num_lbks = ftl_dev_lbks_in_zone(breloc->parent->dev);
|
||||
|
||||
return ftl_reloc_iter_chk_offset(breloc) == num_lbks;
|
||||
return ftl_reloc_iter_zone_offset(breloc) == num_lbks;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -298,25 +298,25 @@ ftl_reloc_read_cb(struct ftl_io *io, void *arg, int status)
|
||||
static void
|
||||
ftl_reloc_iter_reset(struct ftl_band_reloc *breloc)
|
||||
{
|
||||
memset(breloc->iter.chk_offset, 0, ftl_dev_num_punits(breloc->band->dev) *
|
||||
sizeof(*breloc->iter.chk_offset));
|
||||
breloc->iter.chk_current = 0;
|
||||
memset(breloc->iter.zone_offset, 0, ftl_dev_num_punits(breloc->band->dev) *
|
||||
sizeof(*breloc->iter.zone_offset));
|
||||
breloc->iter.zone_current = 0;
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_reloc_iter_lbkoff(struct ftl_band_reloc *breloc)
|
||||
{
|
||||
size_t chk_offset = breloc->iter.chk_current * ftl_dev_lbks_in_chunk(breloc->parent->dev);
|
||||
size_t zone_offset = breloc->iter.zone_current * ftl_dev_lbks_in_zone(breloc->parent->dev);
|
||||
|
||||
return breloc->iter.chk_offset[breloc->iter.chk_current] + chk_offset;
|
||||
return breloc->iter.zone_offset[breloc->iter.zone_current] + zone_offset;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_reloc_iter_next_chk(struct ftl_band_reloc *breloc)
|
||||
ftl_reloc_iter_next_zone(struct ftl_band_reloc *breloc)
|
||||
{
|
||||
size_t num_chk = ftl_dev_num_punits(breloc->band->dev);
|
||||
size_t num_zones = ftl_dev_num_punits(breloc->band->dev);
|
||||
|
||||
breloc->iter.chk_current = (breloc->iter.chk_current + 1) % num_chk;
|
||||
breloc->iter.zone_current = (breloc->iter.zone_current + 1) % num_zones;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -332,15 +332,15 @@ ftl_reloc_lbk_valid(struct ftl_band_reloc *breloc, size_t lbkoff)
|
||||
static int
|
||||
ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
|
||||
{
|
||||
size_t chunk = breloc->iter.chk_current;
|
||||
size_t zone = breloc->iter.zone_current;
|
||||
|
||||
*lbkoff = ftl_reloc_iter_lbkoff(breloc);
|
||||
|
||||
if (ftl_reloc_iter_chk_done(breloc)) {
|
||||
if (ftl_reloc_iter_zone_done(breloc)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
breloc->iter.chk_offset[chunk]++;
|
||||
breloc->iter.zone_offset[zone]++;
|
||||
|
||||
if (!ftl_reloc_lbk_valid(breloc, *lbkoff)) {
|
||||
ftl_reloc_clr_lbk(breloc, *lbkoff);
|
||||
@ -353,9 +353,9 @@ ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
|
||||
static int
|
||||
ftl_reloc_first_valid_lbk(struct ftl_band_reloc *breloc, size_t *lbkoff)
|
||||
{
|
||||
size_t i, num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev);
|
||||
size_t i, num_lbks = ftl_dev_lbks_in_zone(breloc->parent->dev);
|
||||
|
||||
for (i = ftl_reloc_iter_chk_offset(breloc); i < num_lbks; ++i) {
|
||||
for (i = ftl_reloc_iter_zone_offset(breloc); i < num_lbks; ++i) {
|
||||
if (ftl_reloc_iter_next(breloc, lbkoff)) {
|
||||
return 1;
|
||||
}
|
||||
@ -368,11 +368,11 @@ static int
|
||||
ftl_reloc_iter_done(struct ftl_band_reloc *breloc)
|
||||
{
|
||||
size_t i;
|
||||
size_t num_chks = ftl_dev_num_punits(breloc->band->dev);
|
||||
size_t num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev);
|
||||
size_t num_zones = ftl_dev_num_punits(breloc->band->dev);
|
||||
size_t num_lbks = ftl_dev_lbks_in_zone(breloc->parent->dev);
|
||||
|
||||
for (i = 0; i < num_chks; ++i) {
|
||||
if (breloc->iter.chk_offset[i] != num_lbks) {
|
||||
for (i = 0; i < num_zones; ++i) {
|
||||
if (breloc->iter.zone_offset[i] != num_lbks) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -409,7 +409,7 @@ ftl_reloc_next_lbks(struct ftl_band_reloc *breloc, struct ftl_ppa *ppa)
|
||||
|
||||
for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
|
||||
lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, ppa);
|
||||
ftl_reloc_iter_next_chk(breloc);
|
||||
ftl_reloc_iter_next_zone(breloc);
|
||||
|
||||
if (lbk_cnt || ftl_reloc_iter_done(breloc)) {
|
||||
break;
|
||||
@ -617,9 +617,9 @@ ftl_band_reloc_init(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc,
|
||||
return -1;
|
||||
}
|
||||
|
||||
breloc->iter.chk_offset = calloc(ftl_dev_num_punits(band->dev),
|
||||
sizeof(*breloc->iter.chk_offset));
|
||||
if (!breloc->iter.chk_offset) {
|
||||
breloc->iter.zone_offset = calloc(ftl_dev_num_punits(band->dev),
|
||||
sizeof(*breloc->iter.zone_offset));
|
||||
if (!breloc->iter.zone_offset) {
|
||||
SPDK_ERRLOG("Failed to initialize reloc iterator");
|
||||
return -1;
|
||||
}
|
||||
@ -666,7 +666,7 @@ ftl_band_reloc_free(struct ftl_band_reloc *breloc)
|
||||
|
||||
spdk_ring_free(breloc->move_queue);
|
||||
spdk_bit_array_free(&breloc->reloc_map);
|
||||
free(breloc->iter.chk_offset);
|
||||
free(breloc->iter.zone_offset);
|
||||
free(breloc->moves);
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ struct ftl_restore {
|
||||
static int
|
||||
ftl_restore_tail_md(struct ftl_restore_band *rband);
|
||||
static void
|
||||
ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status);
|
||||
ftl_pad_zone_cb(struct ftl_io *io, void *arg, int status);
|
||||
static void
|
||||
ftl_restore_pad_band(struct ftl_restore_band *rband);
|
||||
|
||||
@ -341,7 +341,7 @@ ftl_restore_head_md(void *ctx)
|
||||
lba_map->dma_buf = restore->md_buf + i * ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE;
|
||||
|
||||
if (ftl_band_read_head_md(rband->band, ftl_restore_head_cb, rband)) {
|
||||
if (spdk_likely(rband->band->num_chunks)) {
|
||||
if (spdk_likely(rband->band->num_zones)) {
|
||||
SPDK_ERRLOG("Failed to read metadata on band %zu\n", i);
|
||||
|
||||
rband->md_status = FTL_MD_INVALID_CRC;
|
||||
@ -419,7 +419,7 @@ ftl_restore_next_band(struct ftl_restore *restore)
|
||||
for (; restore->current < ftl_dev_num_bands(restore->dev); ++restore->current) {
|
||||
rband = &restore->bands[restore->current];
|
||||
|
||||
if (spdk_likely(rband->band->num_chunks) &&
|
||||
if (spdk_likely(rband->band->num_zones) &&
|
||||
rband->md_status == FTL_MD_SUCCESS) {
|
||||
restore->current++;
|
||||
return rband;
|
||||
@ -1068,11 +1068,11 @@ ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb)
|
||||
}
|
||||
|
||||
static bool
|
||||
ftl_pad_chunk_pad_finish(struct ftl_restore_band *rband, bool direct_access)
|
||||
ftl_pad_zone_pad_finish(struct ftl_restore_band *rband, bool direct_access)
|
||||
{
|
||||
struct ftl_restore *restore = rband->parent;
|
||||
struct ftl_restore_band *next_band;
|
||||
size_t i, num_pad_chunks = 0;
|
||||
size_t i, num_pad_zones = 0;
|
||||
|
||||
if (spdk_unlikely(restore->pad_status && !restore->num_ios)) {
|
||||
if (direct_access) {
|
||||
@ -1086,14 +1086,14 @@ ftl_pad_chunk_pad_finish(struct ftl_restore_band *rband, bool direct_access)
|
||||
return true;
|
||||
}
|
||||
|
||||
for (i = 0; i < rband->band->num_chunks; ++i) {
|
||||
if (rband->band->chunk_buf[i].state != FTL_CHUNK_STATE_CLOSED) {
|
||||
num_pad_chunks++;
|
||||
for (i = 0; i < rband->band->num_zones; ++i) {
|
||||
if (rband->band->zone_buf[i].state != SPDK_BDEV_ZONE_STATE_CLOSED) {
|
||||
num_pad_zones++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Finished all chunks in a band, check if all bands are done */
|
||||
if (num_pad_chunks == 0) {
|
||||
/* Finished all zones in a band, check if all bands are done */
|
||||
if (num_pad_zones == 0) {
|
||||
if (direct_access) {
|
||||
rband->band->state = FTL_BAND_STATE_CLOSED;
|
||||
ftl_band_set_direct_access(rband->band, false);
|
||||
@ -1130,7 +1130,7 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
|
||||
.flags = flags,
|
||||
.type = FTL_IO_WRITE,
|
||||
.lbk_cnt = dev->xfer_size,
|
||||
.cb_fn = ftl_pad_chunk_cb,
|
||||
.cb_fn = ftl_pad_zone_cb,
|
||||
.cb_ctx = rband,
|
||||
.data = buffer,
|
||||
.parent = NULL,
|
||||
@ -1149,12 +1149,12 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status)
|
||||
ftl_pad_zone_cb(struct ftl_io *io, void *arg, int status)
|
||||
{
|
||||
struct ftl_restore_band *rband = arg;
|
||||
struct ftl_restore *restore = rband->parent;
|
||||
struct ftl_band *band = io->band;
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
struct ftl_io *new_io;
|
||||
|
||||
restore->num_ios--;
|
||||
@ -1165,8 +1165,8 @@ ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status)
|
||||
}
|
||||
|
||||
if (io->ppa.lbk + io->lbk_cnt == band->dev->geo.clba) {
|
||||
chunk = ftl_band_chunk_from_ppa(band, io->ppa);
|
||||
chunk->state = FTL_CHUNK_STATE_CLOSED;
|
||||
zone = ftl_band_zone_from_ppa(band, io->ppa);
|
||||
zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
|
||||
} else {
|
||||
struct ftl_ppa ppa = io->ppa;
|
||||
ppa.lbk += io->lbk_cnt;
|
||||
@ -1182,7 +1182,7 @@ ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status)
|
||||
|
||||
end:
|
||||
spdk_dma_free(io->iov[0].iov_base);
|
||||
ftl_pad_chunk_pad_finish(rband, true);
|
||||
ftl_pad_zone_pad_finish(rband, true);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1198,8 +1198,8 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
|
||||
size_t i;
|
||||
int rc = 0;
|
||||
|
||||
/* Check if some chunks are not closed */
|
||||
if (ftl_pad_chunk_pad_finish(rband, false)) {
|
||||
/* Check if some zones are not closed */
|
||||
if (ftl_pad_zone_pad_finish(rband, false)) {
|
||||
/*
|
||||
* If we're here, end meta wasn't recognized, but the whole band is written
|
||||
* Assume the band was padded and ignore it
|
||||
@ -1214,16 +1214,16 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < band->num_chunks; ++i) {
|
||||
if (band->chunk_buf[i].state == FTL_CHUNK_STATE_CLOSED) {
|
||||
for (i = 0; i < band->num_zones; ++i) {
|
||||
if (band->zone_buf[i].state == SPDK_BDEV_ZONE_STATE_CLOSED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
rc = ftl_retrieve_chunk_info(dev, band->chunk_buf[i].start_ppa, &info, 1);
|
||||
rc = ftl_retrieve_chunk_info(dev, band->zone_buf[i].start_ppa, &info, 1);
|
||||
if (spdk_unlikely(rc)) {
|
||||
goto error;
|
||||
}
|
||||
ppa = band->chunk_buf[i].start_ppa;
|
||||
ppa = band->zone_buf[i].start_ppa;
|
||||
ppa.lbk = info.wp;
|
||||
|
||||
buffer = spdk_dma_zmalloc(FTL_BLOCK_SIZE * dev->xfer_size, 0, NULL);
|
||||
@ -1246,7 +1246,7 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
|
||||
|
||||
error:
|
||||
restore->pad_status = rc;
|
||||
ftl_pad_chunk_pad_finish(rband, true);
|
||||
ftl_pad_zone_pad_finish(rband, true);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -474,8 +474,8 @@ bdev_ftl_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
|
||||
spdk_json_write_named_object_begin(w, "ftl");
|
||||
|
||||
_bdev_ftl_write_config_info(ftl_bdev, w);
|
||||
spdk_json_write_named_string_fmt(w, "num_chunks", "%zu", attrs.num_chunks);
|
||||
spdk_json_write_named_string_fmt(w, "chunk_size", "%zu", attrs.chunk_size);
|
||||
spdk_json_write_named_string_fmt(w, "num_zones", "%zu", attrs.num_zones);
|
||||
spdk_json_write_named_string_fmt(w, "zone_size", "%zu", attrs.zone_size);
|
||||
|
||||
/* ftl */
|
||||
spdk_json_write_object_end(w);
|
||||
|
@ -87,7 +87,7 @@ struct ftl_band *
|
||||
test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
|
||||
{
|
||||
struct ftl_band *band;
|
||||
struct ftl_chunk *chunk;
|
||||
struct ftl_zone *zone;
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(dev != NULL);
|
||||
SPDK_CU_ASSERT_FATAL(id < dev->geo.num_chk);
|
||||
@ -98,26 +98,26 @@ test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
|
||||
|
||||
band->state = FTL_BAND_STATE_CLOSED;
|
||||
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
|
||||
CIRCLEQ_INIT(&band->chunks);
|
||||
CIRCLEQ_INIT(&band->zones);
|
||||
|
||||
band->lba_map.vld = spdk_bit_array_create(ftl_num_band_lbks(dev));
|
||||
SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL);
|
||||
|
||||
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
|
||||
SPDK_CU_ASSERT_FATAL(band->chunk_buf != NULL);
|
||||
band->zone_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->zone_buf));
|
||||
SPDK_CU_ASSERT_FATAL(band->zone_buf != NULL);
|
||||
|
||||
band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
|
||||
SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL);
|
||||
|
||||
for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
|
||||
chunk = &band->chunk_buf[i];
|
||||
chunk->pos = i;
|
||||
chunk->state = FTL_CHUNK_STATE_CLOSED;
|
||||
chunk->punit = &dev->punits[i];
|
||||
chunk->start_ppa = dev->punits[i].start_ppa;
|
||||
chunk->start_ppa.chk = band->id;
|
||||
CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
|
||||
band->num_chunks++;
|
||||
zone = &band->zone_buf[i];
|
||||
zone->pos = i;
|
||||
zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
|
||||
zone->punit = &dev->punits[i];
|
||||
zone->start_ppa = dev->punits[i].start_ppa;
|
||||
zone->start_ppa.chk = band->id;
|
||||
CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
|
||||
band->num_zones++;
|
||||
}
|
||||
|
||||
pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE);
|
||||
@ -143,7 +143,7 @@ test_free_ftl_band(struct ftl_band *band)
|
||||
SPDK_CU_ASSERT_FATAL(band != NULL);
|
||||
spdk_bit_array_free(&band->lba_map.vld);
|
||||
spdk_bit_array_free(&band->reloc_bitmap);
|
||||
free(band->chunk_buf);
|
||||
free(band->zone_buf);
|
||||
spdk_dma_free(band->lba_map.dma_buf);
|
||||
}
|
||||
|
||||
@ -157,5 +157,5 @@ test_offset_from_ppa(struct ftl_ppa ppa, struct ftl_band *band)
|
||||
punit = ftl_ppa_flatten_punit(dev, ppa);
|
||||
CU_ASSERT_EQUAL(ppa.chk, band->id);
|
||||
|
||||
return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
|
||||
return punit * ftl_dev_lbks_in_zone(dev) + ppa.lbk;
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ test_band_lbkoff_from_ppa_base(void)
|
||||
ppa.chk = TEST_BAND_IDX;
|
||||
|
||||
offset = ftl_band_lbkoff_from_ppa(g_band, ppa);
|
||||
CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_chunk(g_dev));
|
||||
CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_zone(g_dev));
|
||||
flat_lun++;
|
||||
}
|
||||
cleanup_band();
|
||||
@ -234,7 +234,7 @@ test_next_xfer_ppa(void)
|
||||
result = ftl_band_next_xfer_ppa(g_band, ppa, 1);
|
||||
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
|
||||
|
||||
/* Verify jumping between chunks */
|
||||
/* Verify jumping between zones */
|
||||
expect = ppa_from_punit(g_range.begin + 1);
|
||||
expect.chk = TEST_BAND_IDX;
|
||||
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size);
|
||||
@ -247,7 +247,7 @@ test_next_xfer_ppa(void)
|
||||
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 3);
|
||||
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
|
||||
|
||||
/* Verify jumping from last chunk to the first one */
|
||||
/* Verify jumping from last zone to the first one */
|
||||
expect = ppa_from_punit(g_range.begin);
|
||||
expect.chk = TEST_BAND_IDX;
|
||||
expect.lbk = g_dev->xfer_size;
|
||||
@ -256,7 +256,7 @@ test_next_xfer_ppa(void)
|
||||
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size);
|
||||
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
|
||||
|
||||
/* Verify jumping from last chunk to the first one with unaligned offset */
|
||||
/* Verify jumping from last zone to the first one with unaligned offset */
|
||||
expect = ppa_from_punit(g_range.begin);
|
||||
expect.chk = TEST_BAND_IDX;
|
||||
expect.lbk = g_dev->xfer_size + 2;
|
||||
@ -276,10 +276,10 @@ test_next_xfer_ppa(void)
|
||||
ftl_dev_num_punits(g_dev) + 3);
|
||||
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
|
||||
|
||||
/* Remove one chunk and verify it's skipped properly */
|
||||
g_band->chunk_buf[1].state = FTL_CHUNK_STATE_BAD;
|
||||
CIRCLEQ_REMOVE(&g_band->chunks, &g_band->chunk_buf[1], circleq);
|
||||
g_band->num_chunks--;
|
||||
/* Remove one zone and verify it's skipped properly */
|
||||
g_band->zone_buf[1].state = SPDK_BDEV_ZONE_STATE_OFFLINE;
|
||||
CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq);
|
||||
g_band->num_zones--;
|
||||
expect = ppa_from_punit(g_range.begin + 2);
|
||||
expect.chk = TEST_BAND_IDX;
|
||||
expect.lbk = g_dev->xfer_size * 5 + 4;
|
||||
|
@ -116,9 +116,9 @@ ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
|
||||
struct spdk_ftl_dev *dev = band->dev;
|
||||
uint64_t punit;
|
||||
|
||||
punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
|
||||
punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;
|
||||
|
||||
ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
|
||||
ppa.lbk = lbkoff % ftl_dev_lbks_in_zone(dev);
|
||||
ppa.chk = band->id;
|
||||
ppa.pu = punit / dev->geo.num_grp;
|
||||
ppa.grp = punit % dev->geo.num_grp;
|
||||
@ -283,7 +283,7 @@ test_reloc_iter_full(void)
|
||||
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_num_band_lbks(dev));
|
||||
|
||||
num_iters = ftl_dev_num_punits(dev) *
|
||||
(ftl_dev_lbks_in_chunk(dev) / reloc->xfer_size);
|
||||
(ftl_dev_lbks_in_zone(dev) / reloc->xfer_size);
|
||||
|
||||
for (i = 0; i < num_iters; i++) {
|
||||
num_lbks = ftl_reloc_next_lbks(breloc, &ppa);
|
||||
@ -293,10 +293,10 @@ test_reloc_iter_full(void)
|
||||
num_iters = ftl_dev_num_punits(dev);
|
||||
|
||||
/* ftl_reloc_next_lbks is searching for maximum xfer_size */
|
||||
/* contiguous valid logic blocks in chunk, so we can end up */
|
||||
/* with some reminder if number of logical blocks in chunk */
|
||||
/* contiguous valid logic blocks in zone, so we can end up */
|
||||
/* with some reminder if number of logical blocks in zone */
|
||||
/* is not divisible by xfer_size */
|
||||
reminder = ftl_dev_lbks_in_chunk(dev) % reloc->xfer_size;
|
||||
reminder = ftl_dev_lbks_in_zone(dev) % reloc->xfer_size;
|
||||
for (i = 0; i < num_iters; i++) {
|
||||
num_lbks = ftl_reloc_next_lbks(breloc, &ppa);
|
||||
CU_ASSERT_EQUAL(reminder, num_lbks);
|
||||
@ -412,7 +412,7 @@ test_reloc_scatter_band(void)
|
||||
}
|
||||
|
||||
static void
|
||||
test_reloc_chunk(void)
|
||||
test_reloc_zone(void)
|
||||
{
|
||||
struct spdk_ftl_dev *dev;
|
||||
struct ftl_reloc *reloc;
|
||||
@ -428,26 +428,24 @@ test_reloc_chunk(void)
|
||||
band->high_prio = 1;
|
||||
ftl_band_alloc_lba_map(band);
|
||||
num_io = MAX_RELOC_QDEPTH * reloc->xfer_size;
|
||||
num_iters = ftl_dev_lbks_in_chunk(dev) / num_io;
|
||||
num_iters = ftl_dev_lbks_in_zone(dev) / num_io;
|
||||
|
||||
set_band_valid_map(band, 0, ftl_num_band_lbks(dev));
|
||||
|
||||
ftl_reloc_add(reloc, band, ftl_dev_lbks_in_chunk(dev) * 3,
|
||||
ftl_dev_lbks_in_chunk(dev), 1, false);
|
||||
|
||||
ftl_reloc_prep(breloc);
|
||||
ftl_reloc_add(reloc, band, ftl_dev_lbks_in_zone(dev) * 3,
|
||||
ftl_dev_lbks_in_zone(dev), 1, false);
|
||||
add_to_active_queue(reloc, breloc);
|
||||
|
||||
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_dev_lbks_in_chunk(dev));
|
||||
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_dev_lbks_in_zone(dev));
|
||||
|
||||
for (i = 1; i <= num_iters ; ++i) {
|
||||
single_reloc_move(breloc);
|
||||
num_lbk = ftl_dev_lbks_in_chunk(dev) - (i * num_io);
|
||||
num_lbk = ftl_dev_lbks_in_zone(dev) - (i * num_io);
|
||||
|
||||
CU_ASSERT_EQUAL(breloc->num_lbks, num_lbk);
|
||||
}
|
||||
|
||||
/* In case num_lbks_in_chunk % num_io != 0 one extra iteration is needed */
|
||||
/* In case num_lbks_in_zone % num_io != 0 one extra iteration is needed */
|
||||
single_reloc_move(breloc);
|
||||
/* Drain move queue */
|
||||
ftl_reloc_process_moves(breloc);
|
||||
@ -518,8 +516,8 @@ main(int argc, char **argv)
|
||||
test_reloc_full_band) == NULL
|
||||
|| CU_add_test(suite, "test_reloc_scatter_band",
|
||||
test_reloc_scatter_band) == NULL
|
||||
|| CU_add_test(suite, "test_reloc_chunk",
|
||||
test_reloc_chunk) == NULL
|
||||
|| CU_add_test(suite, "test_reloc_zone",
|
||||
test_reloc_zone) == NULL
|
||||
|| CU_add_test(suite, "test_reloc_single_lbk",
|
||||
test_reloc_single_lbk) == NULL
|
||||
) {
|
||||
|
@ -147,7 +147,7 @@ test_wptr(void)
|
||||
struct ftl_band *band;
|
||||
struct ftl_io io = { 0 };
|
||||
size_t xfer_size;
|
||||
size_t chunk, lbk, offset, i;
|
||||
size_t zone, lbk, offset, i;
|
||||
int rc;
|
||||
|
||||
setup_wptr_test(&dev, &g_geo, &g_range);
|
||||
@ -162,8 +162,8 @@ test_wptr(void)
|
||||
io.band = band;
|
||||
io.dev = dev;
|
||||
|
||||
for (lbk = 0, offset = 0; lbk < ftl_dev_lbks_in_chunk(dev) / xfer_size; ++lbk) {
|
||||
for (chunk = 0; chunk < band->num_chunks; ++chunk) {
|
||||
for (lbk = 0, offset = 0; lbk < ftl_dev_lbks_in_zone(dev) / xfer_size; ++lbk) {
|
||||
for (zone = 0; zone < band->num_zones; ++zone) {
|
||||
CU_ASSERT_EQUAL(wptr->ppa.lbk, (lbk * xfer_size));
|
||||
CU_ASSERT_EQUAL(wptr->offset, offset);
|
||||
ftl_wptr_advance(wptr, xfer_size);
|
||||
@ -172,7 +172,7 @@ test_wptr(void)
|
||||
}
|
||||
|
||||
CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL);
|
||||
CU_ASSERT_EQUAL(wptr->ppa.lbk, ftl_dev_lbks_in_chunk(dev));
|
||||
CU_ASSERT_EQUAL(wptr->ppa.lbk, ftl_dev_lbks_in_zone(dev));
|
||||
|
||||
ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user